Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def parallel_pf(lclist,
outdir,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
pfmethods=('gls','pdm','mav','win'),
pfkwargs=({},{},{},{}),
sigclip=10.0,
getblssnr=False,
nperiodworkers=NCPUS,
ncontrolworkers=1,
liststartindex=None,
listmaxobjects=None,
minobservations=500,
excludeprocessed=True):
'''This drives the overall parallel period processing for a list of LCs.
As a rough benchmark, 25000 HATNet light curves with up to 50000 points per
LC take about 26 days in total for an invocation of this function using
GLS+PDM+BLS, 10 periodworkers, and 4 controlworkers (so all 40 'cores') on a
2 x Xeon E5-2660v3 machine.
Parameters
----------
lclist : list of str
The list of light curve file to process.
outdir : str
The output directory where the period-finding result pickles will go.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
pfmethods : list of str
This is a list of period finding methods to run. Each element is a
string matching the keys of the `PFMETHODS` dict above. By default, this
runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.
pfkwargs : list of dicts
This is used to provide any special kwargs as dicts to each
period-finding method function specified in `pfmethods`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
getblssnr : bool
If this is True and BLS is one of the methods specified in `pfmethods`,
will also calculate the stats for each best period in the BLS results:
transit depth, duration, ingress duration, refit period and epoch, and
the SNR of the transit.
nperiodworkers : int
The number of parallel period-finding workers to launch per object task.
ncontrolworkers : int
The number of controlling processes to launch. This effectively sets how
many objects from `lclist` will be processed in parallel.
liststartindex : int or None
This sets the index from where to start in `lclist`.
listmaxobjects : int or None
This sets the maximum number of objects in `lclist` to run
period-finding for in this invocation. Together with `liststartindex`,
`listmaxobjects` can be used to distribute processing over several
independent machines if the number of light curves is very large.
minobservations : int
The minimum number of finite LC points required to process a light
curve.
excludeprocessed : bool
If this is True, light curves that have existing period-finding result
pickles in `outdir` will not be processed.
FIXME: currently, this uses a dumb method of excluding already-processed
files. A smarter way to do this is to (i) generate a SHA512 cachekey
based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',
'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make
sure all list kwargs in the dict are sorted, (iii) check if the output
file has the same cachekey in its filename (last 8 chars of cachekey
should work), so the result was processed in exactly the same way as
specifed in the input to this function, and can therefore be
ignored. Will implement this later.
Returns
-------
list of str
A list of the period-finding pickles created for all of input LCs
processed.
'''
# make the output directory if it doesn't exist
if not os.path.exists(outdir):
os.makedirs(outdir)
if (liststartindex is not None) and (listmaxobjects is None):
lclist = lclist[liststartindex:]
elif (liststartindex is None) and (listmaxobjects is not None):
lclist = lclist[:listmaxobjects]
elif (liststartindex is not None) and (listmaxobjects is not None):
lclist = lclist[liststartindex:liststartindex+listmaxobjects]
tasklist = [(x, outdir, timecols, magcols, errcols, lcformat, lcformatdir,
pfmethods, pfkwargs, getblssnr, sigclip, nperiodworkers,
minobservations,
excludeprocessed)
for x in lclist]
with ProcessPoolExecutor(max_workers=ncontrolworkers) as executor:
resultfutures = executor.map(_runpf_worker, tasklist)
results = [x for x in resultfutures]
return results
|
def parallel_pf_lcdir(lcdir,
outdir,
fileglob=None,
recursive=True,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
pfmethods=('gls','pdm','mav','win'),
pfkwargs=({},{},{},{}),
sigclip=10.0,
getblssnr=False,
nperiodworkers=NCPUS,
ncontrolworkers=1,
liststartindex=None,
listmaxobjects=None,
minobservations=500,
excludeprocessed=True):
'''This runs parallel light curve period finding for directory of LCs.
Parameters
----------
lcdir : str
The directory containing the LCs to process.
outdir : str
The directory where the resulting period-finding pickles will go.
fileglob : str or None
The UNIX file glob to use to search for LCs in `lcdir`. If None, the
default file glob associated with the registered LC format will be used
instead.
recursive : bool
If True, will search recursively in `lcdir` for light curves to process.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
pfmethods : list of str
This is a list of period finding methods to run. Each element is a
string matching the keys of the `PFMETHODS` dict above. By default, this
runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.
pfkwargs : list of dicts
This is used to provide any special kwargs as dicts to each
period-finding method function specified in `pfmethods`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
getblssnr : bool
If this is True and BLS is one of the methods specified in `pfmethods`,
will also calculate the stats for each best period in the BLS results:
transit depth, duration, ingress duration, refit period and epoch, and
the SNR of the transit.
nperiodworkers : int
The number of parallel period-finding workers to launch per object task.
ncontrolworkers : int
The number of controlling processes to launch. This effectively sets how
many objects from `lclist` will be processed in parallel.
liststartindex : int or None
This sets the index from where to start in `lclist`.
listmaxobjects : int or None
This sets the maximum number of objects in `lclist` to run
period-finding for in this invocation. Together with `liststartindex`,
`listmaxobjects` can be used to distribute processing over several
independent machines if the number of light curves is very large.
minobservations : int
The minimum number of finite LC points required to process a light
curve.
excludeprocessed : bool
If this is True, light curves that have existing period-finding result
pickles in `outdir` will not be processed.
FIXME: currently, this uses a dumb method of excluding already-processed
files. A smarter way to do this is to (i) generate a SHA512 cachekey
based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',
'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make
sure all list kwargs in the dict are sorted, (iii) check if the output
file has the same cachekey in its filename (last 8 chars of cachekey
should work), so the result was processed in exactly the same way as
specifed in the input to this function, and can therefore be
ignored. Will implement this later.
Returns
-------
list of str
A list of the period-finding pickles created for all of input LCs
processed.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat, lcdir))
if recursive is False:
matching = glob.glob(os.path.join(lcdir, fileglob))
else:
# use recursive glob for Python 3.5+
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcdir,
'**',
fileglob),recursive=True)
# otherwise, use os.walk and glob
else:
# use os.walk to go through the directories
walker = os.walk(lcdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
fileglob)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
# now that we have all the files, process them
if matching and len(matching) > 0:
# this helps us process things in deterministic order when we distribute
# processing over several machines
matching = sorted(matching)
LOGINFO('found %s light curves, running pf...' % len(matching))
return parallel_pf(matching,
outdir,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
lcformatdir=lcformatdir,
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nperiodworkers=nperiodworkers,
ncontrolworkers=ncontrolworkers,
liststartindex=liststartindex,
listmaxobjects=listmaxobjects,
minobservations=minobservations,
excludeprocessed=excludeprocessed)
else:
LOGERROR('no light curve files in %s format found in %s' % (lcformat,
lcdir))
return None
|
def collect_nonperiodic_features(
featuresdir,
magcol,
outfile,
pklglob='varfeatures-*.pkl',
featurestouse=NONPERIODIC_FEATURES_TO_COLLECT,
maxobjects=None,
labeldict=None,
labeltype='binary',
):
'''This collects variability features into arrays for use with the classifer.
Parameters
----------
featuresdir : str
This is the directory where all the varfeatures pickles are. Use
`pklglob` to specify the glob to search for. The `varfeatures` pickles
contain objectids, a light curve magcol, and features as dict
key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used
to produce these.
magcol : str
This is the key in each varfeatures pickle corresponding to the magcol
of the light curve the variability features were extracted from.
outfile : str
This is the filename of the output pickle that will be written
containing a dict of all the features extracted into np.arrays.
pklglob : str
This is the UNIX file glob to use to search for varfeatures pickle files
in `featuresdir`.
featurestouse : list of str
Each varfeatures pickle can contain any combination of non-periodic,
stellar, and periodic features; these must have the same names as
elements in the list of strings provided in `featurestouse`. This tries
to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by
default. If `featurestouse` is provided as a list, gets only the
features listed in this kwarg instead.
maxobjects : int or None
The controls how many pickles from the featuresdir to process. If None,
will process all varfeatures pickles.
labeldict : dict or None
If this is provided, it must be a dict with the following key:val list::
'<objectid>':<label value>
for each objectid collected from the varfeatures pickles. This will turn
the collected information into a training set for classifiers.
Example: to carry out non-periodic variable feature collection of fake
LCS prepared by :py:mod:`astrobase.fakelcs.generation`, use the value
of the 'isvariable' dict elem from the `fakelcs-info.pkl` here, like
so::
labeldict={x:y for x,y in zip(fakelcinfo['objectid'],
fakelcinfo['isvariable'])}
labeltype : {'binary', 'classes'}
This is either 'binary' or 'classes' for binary/multi-class
classification respectively.
Returns
-------
dict
This returns a dict with all of the features collected into np.arrays,
ready to use as input to a scikit-learn classifier.
'''
# list of input pickles generated by varfeatures in lcproc.py
pklist = glob.glob(os.path.join(featuresdir, pklglob))
if maxobjects:
pklist = pklist[:maxobjects]
# fancy progress bar with tqdm if present
if TQDM:
listiterator = tqdm(pklist)
else:
listiterator = pklist
# go through all the varfeatures arrays
feature_dict = {'objectids':[],'magcol':magcol, 'availablefeatures':[]}
LOGINFO('collecting features for magcol: %s' % magcol)
for pkl in listiterator:
with open(pkl,'rb') as infd:
varf = pickle.load(infd)
# update the objectid list
objectid = varf['objectid']
if objectid not in feature_dict['objectids']:
feature_dict['objectids'].append(objectid)
thisfeatures = varf[magcol]
if featurestouse and len(featurestouse) > 0:
featurestoget = featurestouse
else:
featurestoget = NONPERIODIC_FEATURES_TO_COLLECT
# collect all the features for this magcol/objectid combination
for feature in featurestoget:
# update the global feature list if necessary
if ((feature not in feature_dict['availablefeatures']) and
(feature in thisfeatures)):
feature_dict['availablefeatures'].append(feature)
feature_dict[feature] = []
if feature in thisfeatures:
feature_dict[feature].append(
thisfeatures[feature]
)
# now that we've collected all the objects and their features, turn the list
# into arrays, and then concatenate them
for feat in feature_dict['availablefeatures']:
feature_dict[feat] = np.array(feature_dict[feat])
feature_dict['objectids'] = np.array(feature_dict['objectids'])
feature_array = np.column_stack([feature_dict[feat] for feat in
feature_dict['availablefeatures']])
feature_dict['features_array'] = feature_array
# if there's a labeldict available, use it to generate a label array. this
# feature collection is now a training set.
if isinstance(labeldict, dict):
labelarray = np.zeros(feature_dict['objectids'].size, dtype=np.int64)
# populate the labels for each object in the training set
for ind, objectid in enumerate(feature_dict['objectids']):
if objectid in labeldict:
# if this is a binary classifier training set, convert bools to
# ones and zeros
if labeltype == 'binary':
if labeldict[objectid]:
labelarray[ind] = 1
# otherwise, use the actual class label integer
elif labeltype == 'classes':
labelarray[ind] = labeldict[objectid]
feature_dict['labels_array'] = labelarray
feature_dict['kwargs'] = {'pklglob':pklglob,
'featurestouse':featurestouse,
'maxobjects':maxobjects,
'labeltype':labeltype}
# write the info to the output pickle
with open(outfile,'wb') as outfd:
pickle.dump(feature_dict, outfd, pickle.HIGHEST_PROTOCOL)
# return the feature_dict
return feature_dict
|
def train_rf_classifier(
collected_features,
test_fraction=0.25,
n_crossval_iterations=20,
n_kfolds=5,
crossval_scoring_metric='f1',
classifier_to_pickle=None,
nworkers=-1,
):
'''This gets the best RF classifier after running cross-validation.
- splits the training set into test/train samples
- does `KFold` stratified cross-validation using `RandomizedSearchCV`
- gets the `RandomForestClassifier` with the best performance after CV
- gets the confusion matrix for the test set
Runs on the output dict from functions that produce dicts similar to that
produced by `collect_nonperiodic_features` above.
Parameters
----------
collected_features : dict or str
This is either the dict produced by a `collect_*_features` function or
the pickle produced by the same.
test_fraction : float
This sets the fraction of the input set that will be used as the
test set after training.
n_crossval_iterations : int
This sets the number of iterations to use when running the
cross-validation.
n_kfolds : int
This sets the number of K-folds to use on the data when doing a
test-train split.
crossval_scoring_metric : str
This is a string that describes how the cross-validation score is
calculated for each iteration. See the URL below for how to specify this
parameter:
http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
By default, this is tuned for binary classification and uses the F1
scoring metric. Change the `crossval_scoring_metric` to another metric
(probably 'accuracy') for multi-class classification, e.g. for periodic
variable classification.
classifier_to_pickle : str
If this is a string indicating the name of a pickle file to write, will
write the trained classifier to the pickle that can be later loaded and
used to classify data.
nworkers : int
This is the number of parallel workers to use in the
RandomForestClassifier. Set to -1 to use all CPUs on your machine.
Returns
-------
dict
A dict containing the trained classifier, cross-validation results, the
input data set, and all input kwargs used is returned, along with
cross-validation score metrics.
'''
if (isinstance(collected_features,str) and
os.path.exists(collected_features)):
with open(collected_features,'rb') as infd:
fdict = pickle.load(infd)
elif isinstance(collected_features, dict):
fdict = collected_features
else:
LOGERROR("can't figure out the input collected_features arg")
return None
tfeatures = fdict['features_array']
tlabels = fdict['labels_array']
tfeaturenames = fdict['availablefeatures']
tmagcol = fdict['magcol']
tobjectids = fdict['objectids']
# split the training set into training/test samples using stratification
# to keep the same fraction of variable/nonvariables in each
training_features, testing_features, training_labels, testing_labels = (
train_test_split(
tfeatures,
tlabels,
test_size=test_fraction,
random_state=RANDSEED,
stratify=tlabels
)
)
# get a random forest classifier
clf = RandomForestClassifier(n_jobs=nworkers,
random_state=RANDSEED)
# this is the grid def for hyperparam optimization
rf_hyperparams = {
"max_depth": [3,4,5,None],
"n_estimators":sp_randint(100,2000),
"max_features": sp_randint(1, 5),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(2, 11),
}
# run the stratified kfold cross-validation on training features using our
# random forest classifier object
cvsearch = RandomizedSearchCV(
clf,
param_distributions=rf_hyperparams,
n_iter=n_crossval_iterations,
scoring=crossval_scoring_metric,
cv=StratifiedKFold(n_splits=n_kfolds,
shuffle=True,
random_state=RANDSEED),
random_state=RANDSEED
)
LOGINFO('running grid-search CV to optimize RF hyperparameters...')
cvsearch_classifiers = cvsearch.fit(training_features,
training_labels)
# report on the classifiers' performance
_gridsearch_report(cvsearch_classifiers.cv_results_)
# get the best classifier after CV is done
bestclf = cvsearch_classifiers.best_estimator_
bestclf_score = cvsearch_classifiers.best_score_
bestclf_hyperparams = cvsearch_classifiers.best_params_
# test this classifier on the testing set
test_predicted_labels = bestclf.predict(testing_features)
recscore = recall_score(testing_labels, test_predicted_labels)
precscore = precision_score(testing_labels,test_predicted_labels)
f1score = f1_score(testing_labels, test_predicted_labels)
confmatrix = confusion_matrix(testing_labels, test_predicted_labels)
# write the classifier, its training/testing set, and its stats to the
# pickle if requested
outdict = {'features':tfeatures,
'labels':tlabels,
'feature_names':tfeaturenames,
'magcol':tmagcol,
'objectids':tobjectids,
'kwargs':{'test_fraction':test_fraction,
'n_crossval_iterations':n_crossval_iterations,
'n_kfolds':n_kfolds,
'crossval_scoring_metric':crossval_scoring_metric,
'nworkers':nworkers},
'collect_kwargs':fdict['kwargs'],
'testing_features':testing_features,
'testing_labels':testing_labels,
'training_features':training_features,
'training_labels':training_labels,
'best_classifier':bestclf,
'best_score':bestclf_score,
'best_hyperparams':bestclf_hyperparams,
'best_recall':recscore,
'best_precision':precscore,
'best_f1':f1score,
'best_confmatrix':confmatrix}
if classifier_to_pickle:
with open(classifier_to_pickle,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
# return this classifier and accompanying info
return outdict
|
def apply_rf_classifier(classifier,
varfeaturesdir,
outpickle,
maxobjects=None):
'''This applys an RF classifier trained using `train_rf_classifier`
to varfeatures pickles in `varfeaturesdir`.
Parameters
----------
classifier : dict or str
This is the output dict or pickle created by `get_rf_classifier`. This
will contain a `features_name` key that will be used to collect the same
features used to train the classifier from the varfeatures pickles in
varfeaturesdir.
varfeaturesdir : str
The directory containing the varfeatures pickles for objects that will
be classified by the trained `classifier`.
outpickle : str
This is a filename for the pickle that will be written containing the
result dict from this function.
maxobjects : int
This sets the number of objects to process in `varfeaturesdir`.
Returns
-------
dict
The classification results after running the trained `classifier` as
returned as a dict. This contains predicted labels and their prediction
probabilities.
'''
if isinstance(classifier,str) and os.path.exists(classifier):
with open(classifier,'rb') as infd:
clfdict = pickle.load(infd)
elif isinstance(classifier, dict):
clfdict = classifier
else:
LOGERROR("can't figure out the input classifier arg")
return None
# get the features to extract from clfdict
if 'feature_names' not in clfdict:
LOGERROR("feature_names not present in classifier input, "
"can't figure out which ones to extract from "
"varfeature pickles in %s" % varfeaturesdir)
return None
# get the feature labeltype, pklglob, and maxobjects from classifier's
# collect_kwargs elem.
featurestouse = clfdict['feature_names']
pklglob = clfdict['collect_kwargs']['pklglob']
magcol = clfdict['magcol']
# extract the features used by the classifier from the varfeatures pickles
# in varfeaturesdir using the pklglob provided
featfile = os.path.join(
os.path.dirname(outpickle),
'actual-collected-features.pkl'
)
features = collect_nonperiodic_features(
varfeaturesdir,
magcol,
featfile,
pklglob=pklglob,
featurestouse=featurestouse,
maxobjects=maxobjects
)
# now use the trained classifier on these features
bestclf = clfdict['best_classifier']
predicted_labels = bestclf.predict(features['features_array'])
# FIXME: do we need to use the probability calibration curves to fix these
# probabilities? probably. figure out how to do this.
predicted_label_probs = bestclf.predict_proba(
features['features_array']
)
outdict = {
'features':features,
'featfile':featfile,
'classifier':clfdict,
'predicted_labels':predicted_labels,
'predicted_label_probs':predicted_label_probs,
}
with open(outpickle,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
|
def plot_training_results(classifier,
classlabels,
outfile):
'''This plots the training results from the classifier run on the training
set.
- plots the confusion matrix
- plots the feature importances
- FIXME: plot the learning curves too, see:
http://scikit-learn.org/stable/modules/learning_curve.html
Parameters
----------
classifier : dict or str
This is the output dict or pickle created by `get_rf_classifier`
containing the trained classifier.
classlabels : list of str
This contains all of the class labels for the current classification
problem.
outfile : str
This is the filename where the plots will be written.
Returns
-------
str
The path to the generated plot file.
'''
if isinstance(classifier,str) and os.path.exists(classifier):
with open(classifier,'rb') as infd:
clfdict = pickle.load(infd)
elif isinstance(classifier, dict):
clfdict = classifier
else:
LOGERROR("can't figure out the input classifier arg")
return None
confmatrix = clfdict['best_confmatrix']
overall_feature_importances = clfdict[
'best_classifier'
].feature_importances_
feature_importances_per_tree = np.array([
tree.feature_importances_
for tree in clfdict['best_classifier'].estimators_
])
stdev_feature_importances = np.std(feature_importances_per_tree,axis=0)
feature_names = np.array(clfdict['feature_names'])
plt.figure(figsize=(6.4*3.0,4.8))
# confusion matrix
plt.subplot(121)
classes = np.array(classlabels)
plt.imshow(confmatrix, interpolation='nearest', cmap=plt.cm.Blues)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
plt.title('evaluation set confusion matrix')
plt.ylabel('predicted class')
plt.xlabel('actual class')
thresh = confmatrix.max() / 2.
for i, j in itertools.product(range(confmatrix.shape[0]),
range(confmatrix.shape[1])):
plt.text(j, i, confmatrix[i, j],
horizontalalignment="center",
color="white" if confmatrix[i, j] > thresh else "black")
# feature importances
plt.subplot(122)
features = np.array(feature_names)
sorted_ind = np.argsort(overall_feature_importances)[::-1]
features = features[sorted_ind]
feature_names = feature_names[sorted_ind]
overall_feature_importances = overall_feature_importances[sorted_ind]
stdev_feature_importances = stdev_feature_importances[sorted_ind]
plt.bar(np.arange(0,features.size),
overall_feature_importances,
yerr=stdev_feature_importances,
width=0.8,
color='grey')
plt.xticks(np.arange(0,features.size),
features,
rotation=90)
plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
plt.xlim(-0.75, features.size - 1.0 + 0.75)
plt.ylim(0.0,0.9)
plt.ylabel('relative importance')
plt.title('relative importance of features')
plt.subplots_adjust(wspace=0.1)
plt.savefig(outfile,
bbox_inches='tight',
dpi=100)
plt.close('all')
return outfile
|
def _fourier_func(fourierparams, phase, mags):
'''This returns a summed Fourier cosine series.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
phase,mags : np.array
The input phase and magnitude areas to use as the basis for the cosine
series. The phases are used directly to generate the values of the
function, while the mags array is used to generate the zeroth order
amplitude coefficient.
Returns
-------
np.array
The Fourier cosine series function evaluated over `phase`.
'''
# figure out the order from the length of the Fourier param list
order = int(len(fourierparams)/2)
# get the amplitude and phase coefficients
f_amp = fourierparams[:order]
f_pha = fourierparams[order:]
# calculate all the individual terms of the series
f_orders = [f_amp[x]*npcos(2.0*pi_value*x*phase + f_pha[x])
for x in range(order)]
# this is the zeroth order coefficient - a constant equal to median mag
total_f = npmedian(mags)
# sum the series
for fo in f_orders:
total_f += fo
return total_f
|
def _fourier_chisq(fourierparams,
phase,
mags,
errs):
'''This is the chisq objective function to be minimized by `scipy.minimize`.
The parameters are the same as `_fourier_func` above. `errs` is used to
calculate the chisq value.
'''
f = _fourier_func(fourierparams, phase, mags)
chisq = npsum(((mags - f)*(mags - f))/(errs*errs))
return chisq
|
def _fourier_residual(fourierparams,
phase,
mags):
'''
This is the residual objective function to be minimized by `scipy.leastsq`.
The parameters are the same as `_fourier_func` above.
'''
f = _fourier_func(fourierparams, phase, mags)
residual = mags - f
return residual
|
def fourier_fit_magseries(times, mags, errs, period,
fourierorder=None,
fourierparams=None,
sigclip=3.0,
magsarefluxes=False,
plotfit=False,
ignoreinitfail=True,
verbose=True):
'''This fits a Fourier series to a mag/flux time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a Fourier cosine series to.
period : float
The period to use for the Fourier fit.
fourierorder : None or int
If this is an int, will be interpreted as the Fourier order of the
series to fit to the input mag/flux times-series. If this is None and
`fourierparams` is specified, `fourierparams` will be used directly to
generate the fit Fourier series. If `fourierparams` is also None, this
function will try to fit a Fourier cosine series of order 3 to the
mag/flux time-series.
fourierparams : list of floats or None
If this is specified as a list of floats, it must be of the form below::
[fourier_amp1, fourier_amp2, fourier_amp3,...,fourier_ampN,
fourier_phase1, fourier_phase2, fourier_phase3,...,fourier_phaseN]
to specify a Fourier cosine series of order N. If this is None and
`fourierorder` is specified, the Fourier order specified there will be
used to construct the Fourier cosine series used to fit the input
mag/flux time-series. If both are None, this function will try to fit a
Fourier cosine series of order 3 to the input mag/flux time-series.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'fourier',
'fitinfo':{
'finalparams': the list of final model fit params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
... other fit function specific keys ...
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
NOTE: the returned value of 'fitepoch' in the 'fitinfo' dict returned by
this function is the time value of the first observation since this is
where the LC is folded for the fit procedure. To get the actual time of
minimum epoch as calculated by a spline fit to the phased LC, use the
key 'actual_fitepoch' in the 'fitinfo' dict.
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
# get the fourier order either from the scalar order kwarg...
if fourierorder and fourierorder > 0 and not fourierparams:
fourieramps = [0.6] + [0.2]*(fourierorder - 1)
fourierphas = [0.1] + [0.1]*(fourierorder - 1)
fourierparams = fourieramps + fourierphas
# or from the fully specified coeffs vector
elif not fourierorder and fourierparams:
fourierorder = int(len(fourierparams)/2)
else:
LOGWARNING('specified both/neither Fourier order AND Fourier coeffs, '
'using default Fourier order of 3')
fourierorder = 3
fourieramps = [0.6] + [0.2]*(fourierorder - 1)
fourierphas = [0.1] + [0.1]*(fourierorder - 1)
fourierparams = fourieramps + fourierphas
if verbose:
LOGINFO('fitting Fourier series of order %s to '
'mag series with %s observations, '
'using period %.6f, folded at %.6f' % (fourierorder,
len(phase),
period,
mintime))
# initial minimize call to find global minimum in chi-sq
initialfit = spminimize(_fourier_chisq,
fourierparams,
method='BFGS',
args=(phase, pmags, perrs))
# make sure this initial fit succeeds before proceeding
if initialfit.success or ignoreinitfail:
if verbose:
LOGINFO('initial fit done, refining...')
leastsqparams = initialfit.x
try:
leastsqfit = spleastsq(_fourier_residual,
leastsqparams,
args=(phase, pmags))
except Exception as e:
leastsqfit = None
# if the fit succeeded, then we can return the final parameters
if leastsqfit and leastsqfit[-1] in (1,2,3,4):
finalparams = leastsqfit[0]
# calculate the chisq and reduced chisq
fitmags = _fourier_func(finalparams, phase, pmags)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - len(finalparams) - 1)
if verbose:
LOGINFO(
'final fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq,fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
# assemble the returndict
returndict = {
'fittype':'fourier',
'fitinfo':{
'fourierorder':fourierorder,
'finalparams':finalparams,
'initialfit':initialfit,
'leastsqfit':leastsqfit,
'fitmags':fitmags,
'fitepoch':mintime,
'actual_fitepoch':ptimes[fitmagminind]
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, mintime,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
# if the leastsq fit did not succeed, return Nothing
else:
LOGERROR('fourier-fit: least-squared fit to the light curve failed')
return {
'fittype':'fourier',
'fitinfo':{
'fourierorder':fourierorder,
'finalparams':None,
'initialfit':initialfit,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
# if the fit didn't succeed, we can't proceed
else:
LOGERROR('initial Fourier fit did not succeed, '
'reason: %s, returning scipy OptimizeResult'
% initialfit.message)
return {
'fittype':'fourier',
'fitinfo':{
'fourierorder':fourierorder,
'finalparams':None,
'initialfit':initialfit,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
|
def plot_magseries(times,
mags,
magsarefluxes=False,
errs=None,
out=None,
sigclip=30.0,
normto='globalmedian',
normmingap=4.0,
timebin=None,
yrange=None,
segmentmingap=100.0,
plotdpi=100):
'''This plots a magnitude/flux time-series.
Parameters
----------
times,mags : np.array
The mag/flux time-series to plot as a function of time.
magsarefluxes : bool
Indicates if the input `mags` array is actually an array of flux
measurements instead of magnitude measurements. If this is set to True,
then the plot y-axis will be set as appropriate for mag or fluxes. In
addition:
- if `normto` is 'zero', then the median flux is divided from each
observation's flux value to yield normalized fluxes with 1.0 as the
global median.
- if `normto` is 'globalmedian', then the global median flux value
across the entire time series is multiplied with each measurement.
- if `norm` is set to a `float`, then this number is multiplied with the
flux value for each measurement.
errs : np.array or None
If this is provided, contains the measurement errors associated with
each measurement of flux/mag in time-series. Providing this kwarg will
add errbars to the output plot.
out : str or StringIO/BytesIO object or None
Sets the output type and target:
- If `out` is a string, will save the plot to the specified file name.
- If `out` is a StringIO/BytesIO object, will save the plot to that file
handle. This can be useful to carry out additional operations on the
output binary stream, or convert it to base64 text for embedding in
HTML pages.
- If `out` is None, will save the plot to a file called
'magseries-plot.png' in the current working directory.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
Sets the normalization target::
'globalmedian' -> norms each mag to the global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
timebin : float or None
The bin size to use to group together measurements closer than this
amount in time. This is in seconds. If this is None, no time-binning
will be performed.
yrange : list of two floats or None
This is used to provide a custom y-axis range to the plot. If None, will
automatically determine y-axis range.
segmentmingap : float or None
This controls the minimum length of time (in days) required to consider
a timegroup in the light curve as a separate segment. This is useful
when the light curve consists of measurements taken over several
seasons, so there's lots of dead space in the plot that can be cut out
to zoom in on the interesting stuff. If `segmentmingap` is not None, the
magseries plot will be cut in this way and the x-axis will show these
breaks.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
Returns
-------
str or BytesIO/StringIO object
Returns based on the input:
- If `out` is a str or None, the path to the generated plot file is
returned.
- If `out` is a StringIO/BytesIO object, will return the
StringIO/BytesIO object to which the plot was written.
'''
# sigclip the magnitude timeseries
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# now we proceed to binning
if timebin and errs is not None:
binned = time_bin_magseries_with_errs(stimes, smags, serrs,
binsize=timebin)
btimes, bmags, berrs = (binned['binnedtimes'],
binned['binnedmags'],
binned['binnederrs'])
elif timebin and errs is None:
binned = time_bin_magseries(stimes, smags,
binsize=timebin)
btimes, bmags, berrs = binned['binnedtimes'], binned['binnedmags'], None
else:
btimes, bmags, berrs = stimes, smags, serrs
# check if we need to normalize
if normto is not False:
btimes, bmags = normalize_magseries(btimes, bmags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
btimeorigin = btimes.min()
btimes = btimes - btimeorigin
##################################
## FINALLY PLOT THE LIGHT CURVE ##
##################################
# if we're going to plot with segment gaps highlighted, then find the gaps
if segmentmingap is not None:
ntimegroups, timegroups = find_lc_timegroups(btimes,
mingap=segmentmingap)
# get the yrange for all the plots if it's given
if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == 2:
ymin, ymax = yrange
# if it's not given, figure it out
else:
# the plot y limits are just 0.05 mags on each side if mags are used
if not magsarefluxes:
ymin, ymax = (bmags.min() - 0.05,
bmags.max() + 0.05)
# if we're dealing with fluxes, limits are 2% of the flux range per side
else:
ycov = bmags.max() - bmags.min()
ymin = bmags.min() - 0.02*ycov
ymax = bmags.max() + 0.02*ycov
# if we're supposed to make the plot segment-aware (i.e. gaps longer than
# segmentmingap will be cut out)
if segmentmingap and ntimegroups > 1:
LOGINFO('%s time groups found' % ntimegroups)
# our figure is now a multiple axis plot
# the aspect ratio is a bit wider
fig, axes = plt.subplots(1,ntimegroups,sharey=True)
fig.set_size_inches(10,4.8)
axes = np.ravel(axes)
# now go through each axis and make the plots for each timegroup
for timegroup, ax, axind in zip(timegroups, axes, range(len(axes))):
tgtimes = btimes[timegroup]
tgmags = bmags[timegroup]
if berrs:
tgerrs = berrs[timegroup]
else:
tgerrs = None
LOGINFO('axes: %s, timegroup %s: JD %.3f to %.3f' % (
axind,
axind+1,
btimeorigin + tgtimes.min(),
btimeorigin + tgtimes.max())
)
ax.errorbar(tgtimes, tgmags, fmt='go', yerr=tgerrs,
markersize=2.0, markeredgewidth=0.0, ecolor='grey',
capsize=0)
# don't use offsets on any xaxis
ax.get_xaxis().get_major_formatter().set_useOffset(False)
# fix the ticks to use no yoffsets and remove right spines for first
# axes instance
if axind == 0:
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.spines['right'].set_visible(False)
ax.yaxis.tick_left()
# remove the right and left spines for the other axes instances
elif 0 < axind < (len(axes)-1):
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.tick_params(right='off', labelright='off',
left='off',labelleft='off')
# make the left spines invisible for the last axes instance
elif axind == (len(axes)-1):
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(True)
ax.yaxis.tick_right()
# set the yaxis limits
if not magsarefluxes:
ax.set_ylim(ymax, ymin)
else:
ax.set_ylim(ymin, ymax)
# now figure out the xaxis ticklabels and ranges
tgrange = tgtimes.max() - tgtimes.min()
if tgrange < 10.0:
ticklocations = [tgrange/2.0]
ax.set_xlim(npmin(tgtimes) - 0.5, npmax(tgtimes) + 0.5)
elif 10.0 < tgrange < 30.0:
ticklocations = np.linspace(tgtimes.min()+5.0,
tgtimes.max()-5.0,
num=2)
ax.set_xlim(npmin(tgtimes) - 2.0, npmax(tgtimes) + 2.0)
elif 30.0 < tgrange < 100.0:
ticklocations = np.linspace(tgtimes.min()+10.0,
tgtimes.max()-10.0,
num=3)
ax.set_xlim(npmin(tgtimes) - 2.5, npmax(tgtimes) + 2.5)
else:
ticklocations = np.linspace(tgtimes.min()+20.0,
tgtimes.max()-20.0,
num=3)
ax.set_xlim(npmin(tgtimes) - 3.0, npmax(tgtimes) + 3.0)
ax.xaxis.set_ticks([int(x) for x in ticklocations])
# done with plotting all the sub axes
# make the distance between sub plots smaller
plt.subplots_adjust(wspace=0.07)
# make the overall x and y labels
fig.text(0.5, 0.00, 'JD - %.3f (not showing gaps > %.2f d)' %
(btimeorigin, segmentmingap), ha='center')
if not magsarefluxes:
fig.text(0.02, 0.5, 'magnitude', va='center', rotation='vertical')
else:
fig.text(0.02, 0.5, 'flux', va='center', rotation='vertical')
# make normal figure otherwise
else:
fig = plt.figure()
fig.set_size_inches(7.5,4.8)
plt.errorbar(btimes, bmags, fmt='go', yerr=berrs,
markersize=2.0, markeredgewidth=0.0, ecolor='grey',
capsize=0)
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# fix the ticks to use no offsets
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.xlabel('JD - %.3f' % btimeorigin)
# set the yaxis limits and labels
if not magsarefluxes:
plt.ylim(ymax, ymin)
plt.ylabel('magnitude')
else:
plt.ylim(ymin, ymax)
plt.ylabel('flux')
# check if the output filename is actually an instance of StringIO
if sys.version_info[:2] < (3,0):
is_Strio = isinstance(out, cStringIO.InputType)
else:
is_Strio = isinstance(out, Strio)
# write the plot out to a file if requested
if out and not is_Strio:
if out.endswith('.png'):
plt.savefig(out,bbox_inches='tight',dpi=plotdpi)
else:
plt.savefig(out,bbox_inches='tight')
plt.close()
return os.path.abspath(out)
elif out and is_Strio:
plt.savefig(out, bbox_inches='tight', dpi=plotdpi, format='png')
return out
elif not out and dispok:
plt.show()
plt.close()
return
else:
LOGWARNING('no output file specified and no $DISPLAY set, '
'saving to magseries-plot.png in current directory')
outfile = 'magseries-plot.png'
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
plt.close()
return os.path.abspath(outfile)
|
def plot_phased_magseries(times,
mags,
period,
epoch='min',
fitknotfrac=0.01,
errs=None,
magsarefluxes=False,
normto='globalmedian',
normmingap=4.0,
sigclip=30.0,
phasewrap=True,
phasesort=True,
phasebin=None,
plotphaselim=(-0.8,0.8),
yrange=None,
xtimenotphase=False,
xaxlabel='phase',
yaxlabel=None,
modelmags=None,
modeltimes=None,
modelerrs=None,
outfile=None,
plotdpi=100):
'''Plots a phased magnitude/flux time-series using the period provided.
Parameters
----------
times,mags : np.array
The mag/flux time-series to plot as a function of phase given `period`.
period : float
The period to use to phase-fold the time-series. Should be the same unit
as `times` (usually in days)
epoch : 'min' or float or None
This indicates how to get the epoch to use for phasing the light curve:
- If None, uses the `min(times)` as the epoch for phasing.
- If epoch is the string 'min', then fits a cubic spline to the phased
light curve using `min(times)` as the initial epoch, finds the
magnitude/flux minimum of this phased light curve fit, and finally
uses the that time value as the epoch. This is useful for plotting
planetary transits and eclipsing binary phased light curves so that
phase 0.0 corresponds to the mid-center time of primary eclipse (or
transit).
- If epoch is a float, then uses that directly to phase the light
curve and as the epoch of the phased mag series plot.
fitknotfrac : float
If `epoch='min'`, this function will attempt to fit a cubic spline to
the phased light curve to find a time of light minimum as phase
0.0. This kwarg sets the number of knots to generate the spline as a
fraction of the total number of measurements in the input
time-series. By default, this is set so that 100 knots are used to
generate a spline for fitting the phased light curve consisting of 10000
measurements.
errs : np.array or None
If this is provided, contains the measurement errors associated with
each measurement of flux/mag in time-series. Providing this kwarg will
add errbars to the output plot.
magsarefluxes : bool
Indicates if the input `mags` array is actually an array of flux
measurements instead of magnitude measurements. If this is set to True,
then the plot y-axis will be set as appropriate for mag or fluxes.
normto : {'globalmedian', 'zero'} or a float
Sets the normalization target::
'globalmedian' -> norms each mag to the global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
phasewrap : bool
If this is True, the phased time-series will be wrapped around phase
0.0.
phasesort : bool
If this is True, the phased time-series will be sorted in phase.
phasebin : float or None
If this is provided, indicates the bin size to use to group together
measurements closer than this amount in phase. This is in units of
phase. The binned phased light curve will be overplotted on top of the
phased light curve. Useful for when one has many measurement points and
needs to pick out a small trend in an otherwise noisy phased light
curve.
plotphaselim : sequence of two floats or None
The x-axis limits to use when making the phased light curve plot. By
default, this is (-0.8, 0.8), which places phase 0.0 at the center of
the plot and covers approximately two cycles in phase to make any trends
clear.
yrange : list of two floats or None
This is used to provide a custom y-axis range to the plot. If None, will
automatically determine y-axis range.
xtimenotphase : bool
If True, the x-axis gets units of time (multiplies phase by period).
xaxlabel : str
Sets the label for the x-axis.
yaxlabel : str or None
Sets the label for the y-axis. If this is None, the appropriate label
will be used based on the value of the `magsarefluxes` kwarg.
modeltimes,modelmags,modelerrs : np.array or None
If all of these are provided, then this function will overplot the
values of modeltimes and modelmags on top of the actual phased light
curve. This is useful for plotting variability models on top of the
light curve (e.g. plotting a Mandel-Agol transit model over the actual
phased light curve. These arrays will be phased using the already
provided period and epoch.
outfile : str or StringIO/BytesIO or matplotlib.axes.Axes or None
- a string filename for the file where the plot will be written.
- a StringIO/BytesIO object to where the plot will be written.
- a matplotlib.axes.Axes object to where the plot will be written.
- if None, plots to 'magseries-phased-plot.png' in current dir.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
Returns
-------
str or StringIO/BytesIO or matplotlib.axes.Axes
This returns based on the input:
- If `outfile` is a str or None, the path to the generated plot file is
returned.
- If `outfile` is a StringIO/BytesIO object, will return the
StringIO/BytesIO object to which the plot was written.
- If `outfile` is a matplotlib.axes.Axes object, will return the Axes
object with the plot elements added to it. One can then directly
include this Axes object in some other Figure.
'''
# sigclip the magnitude timeseries
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# check if we need to normalize
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
if ( isinstance(modelmags, np.ndarray) and
isinstance(modeltimes, np.ndarray) ):
stimes, smags = normalize_magseries(modeltimes, modelmags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# figure out the epoch, if it's None, use the min of the time
if epoch is None:
epoch = stimes.min()
# if the epoch is 'min', then fit a spline to the light curve phased
# using the min of the time, find the fit mag minimum and use the time for
# that as the epoch
elif isinstance(epoch, str) and epoch == 'min':
try:
spfit = spline_fit_magseries(stimes, smags, serrs, period,
knotfraction=fitknotfrac)
epoch = spfit['fitinfo']['fitepoch']
if len(epoch) != 1:
epoch = epoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
epoch = npmin(stimes)
# now phase the data light curve (and optionally, phase bin the light curve)
if errs is not None:
phasedlc = phase_magseries_with_errs(stimes, smags, serrs, period,
epoch, wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
ploterrs = phasedlc['errs']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries_with_errs(plotphase, plotmags,
ploterrs,
binsize=phasebin)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
binploterrs = binphasedlc['binnederrs']
else:
phasedlc = phase_magseries(stimes, smags, period, epoch,
wrap=phasewrap, sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
ploterrs = None
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
binploterrs = None
# phase the model light curve
modelplotphase, modelplotmags = None, None
if ( isinstance(modelerrs,np.ndarray) and
isinstance(modeltimes,np.ndarray) and
isinstance(modelmags,np.ndarray) ):
modelphasedlc = phase_magseries_with_errs(modeltimes, modelmags,
modelerrs, period, epoch,
wrap=phasewrap,
sort=phasesort)
modelplotphase = modelphasedlc['phase']
modelplotmags = modelphasedlc['mags']
# note that we never will phase-bin the model (no point).
elif ( not isinstance(modelerrs,np.ndarray) and
isinstance(modeltimes,np.ndarray) and
isinstance(modelmags,np.ndarray) ):
modelphasedlc = phase_magseries(modeltimes, modelmags, period, epoch,
wrap=phasewrap, sort=phasesort)
modelplotphase = modelphasedlc['phase']
modelplotmags = modelphasedlc['mags']
# finally, make the plots
# check if the outfile is actually an Axes object
if isinstance(outfile, matplotlib.axes.Axes):
ax = outfile
# otherwise, it's just a normal file or StringIO/BytesIO
else:
fig = plt.figure()
fig.set_size_inches(7.5,4.8)
ax = plt.gca()
if xtimenotphase:
plotphase *= period
if phasebin:
ax.errorbar(plotphase, plotmags, fmt='o',
color='#B2BEB5',
yerr=ploterrs,
markersize=3.0,
markeredgewidth=0.0,
ecolor='#B2BEB5',
capsize=0)
if xtimenotphase:
binplotphase *= period
ax.errorbar(binplotphase, binplotmags, fmt='bo', yerr=binploterrs,
markersize=5.0, markeredgewidth=0.0, ecolor='#B2BEB5',
capsize=0)
else:
ax.errorbar(plotphase, plotmags, fmt='ko', yerr=ploterrs,
markersize=3.0, markeredgewidth=0.0, ecolor='#B2BEB5',
capsize=0)
if (isinstance(modelplotphase, np.ndarray) and
isinstance(modelplotmags, np.ndarray)):
if xtimenotphase:
modelplotphase *= period
ax.plot(modelplotphase, modelplotmags, zorder=5, linewidth=2,
alpha=0.9, color='#181c19')
# make a grid
ax.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make lines for phase 0.0, 0.5, and -0.5
ax.axvline(0.0,alpha=0.9,linestyle='dashed',color='g')
if not xtimenotphase:
ax.axvline(-0.5,alpha=0.9,linestyle='dashed',color='g')
ax.axvline(0.5,alpha=0.9,linestyle='dashed',color='g')
else:
ax.axvline(-period*0.5,alpha=0.9,linestyle='dashed',color='g')
ax.axvline(period*0.5,alpha=0.9,linestyle='dashed',color='g')
# fix the ticks to use no offsets
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
# get the yrange
if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == 2:
ymin, ymax = yrange
else:
ymin, ymax = ax.get_ylim()
# set the y axis labels and range
if not yaxlabel:
if not magsarefluxes:
ax.set_ylim(ymax, ymin)
yaxlabel = 'magnitude'
else:
ax.set_ylim(ymin, ymax)
yaxlabel = 'flux'
# set the x axis limit
if not plotphaselim:
ax.set_xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
if xtimenotphase:
ax.set_xlim((period*plotphaselim[0],period*plotphaselim[1]))
else:
ax.set_xlim((plotphaselim[0],plotphaselim[1]))
# set up the axis labels and plot title
ax.set_xlabel(xaxlabel)
ax.set_ylabel(yaxlabel)
ax.set_title('period: %.6f d - epoch: %.6f' % (period, epoch))
LOGINFO('using period: %.6f d and epoch: %.6f' % (period, epoch))
# check if the output filename is actually an instance of StringIO
if sys.version_info[:2] < (3,0):
is_Strio = isinstance(outfile, cStringIO.InputType)
else:
is_Strio = isinstance(outfile, Strio)
# make the figure
if (outfile and
not is_Strio and
not isinstance(outfile, matplotlib.axes.Axes)):
if outfile.endswith('.png'):
fig.savefig(outfile, bbox_inches='tight', dpi=plotdpi)
else:
fig.savefig(outfile, bbox_inches='tight')
plt.close()
return period, epoch, os.path.abspath(outfile)
elif outfile and is_Strio:
fig.savefig(outfile, bbox_inches='tight', dpi=plotdpi, format='png')
return outfile
elif outfile and isinstance(outfile, matplotlib.axes.Axes):
return outfile
elif not outfile and dispok:
plt.show()
plt.close()
return period, epoch
else:
LOGWARNING('no output file specified and no $DISPLAY set, '
'saving to magseries-phased-plot.png in current directory')
outfile = 'magseries-phased-plot.png'
plt.savefig(outfile, bbox_inches='tight', dpi=plotdpi)
plt.close()
return period, epoch, os.path.abspath(outfile)
|
def skyview_stamp(ra, decl,
survey='DSS2 Red',
scaling='Linear',
flip=True,
convolvewith=None,
forcefetch=False,
cachedir='~/.astrobase/stamp-cache',
timeout=10.0,
retry_failed=False,
savewcsheader=True,
verbose=False):
'''This downloads a DSS FITS stamp centered on the coordinates specified.
This wraps the function :py:func:`astrobase.services.skyview.get_stamp`,
which downloads Digitized Sky Survey stamps in FITS format from the NASA
SkyView service:
https://skyview.gsfc.nasa.gov/current/cgi/query.pl
Also adds some useful operations on top of the FITS file returned.
Parameters
----------
ra,decl : float
The center coordinates for the stamp in decimal degrees.
survey : str
The survey name to get the stamp from. This is one of the
values in the 'SkyView Surveys' option boxes on the SkyView
webpage. Currently, we've only tested using 'DSS2 Red' as the value for
this kwarg, but the other ones should work in principle.
scaling : str
This is the pixel value scaling function to use.
flip : bool
Will flip the downloaded image top to bottom. This should usually be
True because matplotlib and FITS have different image coord origin
conventions. Alternatively, set this to False and use the
`origin='lower'` in any call to `matplotlib.pyplot.imshow` when plotting
this image.
convolvewith : astropy.convolution Kernel object or None
If `convolvewith` is an astropy.convolution Kernel object from:
http://docs.astropy.org/en/stable/convolution/kernels.html
then, this function will return the stamp convolved with that
kernel. This can be useful to see effects of wide-field telescopes (like
the HATNet and HATSouth lenses) degrading the nominal 1 arcsec/px of
DSS, causing blending of targets and any variability.
forcefetch : bool
If True, will disregard any existing cached copies of the stamp already
downloaded corresponding to the requested center coordinates and
redownload the FITS from the SkyView service.
cachedir : str
This is the path to the astrobase cache directory. All downloaded FITS
stamps are stored here as .fits.gz files so we can immediately respond
with the cached copy when a request is made for a coordinate center
that's already been downloaded.
timeout : float
Sets the timeout in seconds to wait for a response from the NASA SkyView
service.
retry_failed : bool
If the initial request to SkyView fails, and this is True, will retry
until it succeeds.
savewcsheader : bool
If this is True, also returns the WCS header of the downloaded FITS
stamp in addition to the FITS image itself. Useful for projecting object
coordinates onto image xy coordinates for visualization.
verbose : bool
If True, indicates progress.
Returns
-------
tuple or array or None
This returns based on the value of `savewcsheader`:
- If `savewcsheader=True`, returns a tuple:
(FITS stamp image as a numpy array, FITS header)
- If `savewcsheader=False`, returns only the FITS stamp image as numpy
array.
- If the stamp retrieval fails, returns None.
'''
stampdict = get_stamp(ra, decl,
survey=survey,
scaling=scaling,
forcefetch=forcefetch,
cachedir=cachedir,
timeout=timeout,
retry_failed=retry_failed,
verbose=verbose)
#
# DONE WITH FETCHING STUFF
#
if stampdict:
# open the frame
stampfits = pyfits.open(stampdict['fitsfile'])
header = stampfits[0].header
frame = stampfits[0].data
stampfits.close()
# finally, we can process the frame
if flip:
frame = np.flipud(frame)
if verbose:
LOGINFO('fetched stamp successfully for (%.3f, %.3f)'
% (ra, decl))
if convolvewith:
convolved = aconv.convolve(frame, convolvewith)
if savewcsheader:
return convolved, header
else:
return convolved
else:
if savewcsheader:
return frame, header
else:
return frame
else:
LOGERROR('could not fetch the requested stamp for '
'coords: (%.3f, %.3f) from survey: %s and scaling: %s'
% (ra, decl, survey, scaling))
return None
|
def fits_finder_chart(
fitsfile,
outfile,
fitsext=0,
wcsfrom=None,
scale=ZScaleInterval(),
stretch=LinearStretch(),
colormap=plt.cm.gray_r,
findersize=None,
finder_coordlimits=None,
overlay_ra=None,
overlay_decl=None,
overlay_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
overlay_zoomcontain=False,
grid=False,
gridcolor='k'
):
'''This makes a finder chart for a given FITS with an optional object
position overlay.
Parameters
----------
fitsfile : str
`fitsfile` is the FITS file to use to make the finder chart.
outfile : str
`outfile` is the name of the output file. This can be a png or pdf or
whatever else matplotlib can write given a filename and extension.
fitsext : int
Sets the FITS extension in `fitsfile` to use to extract the image array
from.
wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
finder_coordlimits : list of four floats or None
If not None, `finder_coordlimits` sets x and y limits for the plot,
effectively zooming it in if these are smaller than the dimensions of
the FITS image. This should be a list of the form: [minra, maxra,
mindecl, maxdecl] all in decimal degrees.
overlay_ra, overlay_decl : np.array or None
`overlay_ra` and `overlay_decl` are ndarrays containing the RA and Dec
values to overplot on the image as an overlay. If these are both None,
then no overlay will be plotted.
overlay_pltopts : dict
`overlay_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
overlay_zoomcontain : bool
`overlay_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
grid : bool
`grid` sets if a grid will be made on the output image.
gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
Returns
-------
str or None
The filename of the generated output image if successful. None
otherwise.
'''
# read in the FITS file
if wcsfrom is None:
hdulist = pyfits.open(fitsfile)
img, hdr = hdulist[fitsext].data, hdulist[fitsext].header
hdulist.close()
frameshape = (hdr['NAXIS1'], hdr['NAXIS2'])
w = WCS(hdr)
elif os.path.exists(wcsfrom):
hdulist = pyfits.open(fitsfile)
img, hdr = hdulist[fitsext].data, hdulist[fitsext].header
hdulist.close()
frameshape = (hdr['NAXIS1'], hdr['NAXIS2'])
w = WCS(wcsfrom)
else:
LOGERROR('could not determine WCS info for input FITS: %s' %
fitsfile)
return None
# use the frame shape to set the output PNG's dimensions
if findersize is None:
fig = plt.figure(figsize=(frameshape[0]/100.0,
frameshape[1]/100.0))
else:
fig = plt.figure(figsize=findersize)
# set the coord limits if zoomcontain is True
# we'll leave 30 arcseconds of padding on each side
if (overlay_zoomcontain and
overlay_ra is not None and
overlay_decl is not None):
finder_coordlimits = [overlay_ra.min()-30.0/3600.0,
overlay_ra.max()+30.0/3600.0,
overlay_decl.min()-30.0/3600.0,
overlay_decl.max()+30.0/3600.0]
# set the coordinate limits if provided
if finder_coordlimits and isinstance(finder_coordlimits, (list,tuple)):
minra, maxra, mindecl, maxdecl = finder_coordlimits
cntra, cntdecl = (minra + maxra)/2.0, (mindecl + maxdecl)/2.0
pixelcoords = w.all_world2pix([[minra, mindecl],
[maxra, maxdecl],
[cntra, cntdecl]],1)
x1, y1, x2, y2 = (int(pixelcoords[0,0]),
int(pixelcoords[0,1]),
int(pixelcoords[1,0]),
int(pixelcoords[1,1]))
xmin = x1 if x1 < x2 else x2
xmax = x2 if x2 > x1 else x1
ymin = y1 if y1 < y2 else y2
ymax = y2 if y2 > y1 else y1
# create a new WCS with the same transform but new center coordinates
whdr = w.to_header()
whdr['CRPIX1'] = (xmax - xmin)/2
whdr['CRPIX2'] = (ymax - ymin)/2
whdr['CRVAL1'] = cntra
whdr['CRVAL2'] = cntdecl
whdr['NAXIS1'] = xmax - xmin
whdr['NAXIS2'] = ymax - ymin
w = WCS(whdr)
else:
xmin, xmax, ymin, ymax = 0, hdr['NAXIS2'], 0, hdr['NAXIS1']
# add the axes with the WCS projection
# this should automatically handle subimages because we fix the WCS
# appropriately above for these
fig.add_subplot(111,projection=w)
if scale is not None and stretch is not None:
norm = ImageNormalize(img,
interval=scale,
stretch=stretch)
plt.imshow(img[ymin:ymax,xmin:xmax],
origin='lower',
cmap=colormap,
norm=norm)
else:
plt.imshow(img[ymin:ymax,xmin:xmax],
origin='lower',
cmap=colormap)
# handle additional options
if grid:
plt.grid(color=gridcolor,ls='solid',lw=1.0)
# handle the object overlay
if overlay_ra is not None and overlay_decl is not None:
our_pltopts = dict(
transform=plt.gca().get_transform('fk5'),
marker='o',
markersize=10.0,
markerfacecolor='none',
markeredgewidth=2.0,
markeredgecolor='red',
rasterized=True,
linestyle='none'
)
if overlay_pltopts is not None and isinstance(overlay_pltopts,
dict):
our_pltopts.update(overlay_pltopts)
plt.gca().set_autoscale_on(False)
plt.gca().plot(overlay_ra, overlay_decl,
**our_pltopts)
plt.xlabel('Right Ascension [deg]')
plt.ylabel('Declination [deg]')
# get the x and y axes objects to fix the ticks
xax = plt.gca().coords[0]
yax = plt.gca().coords[1]
yax.set_major_formatter('d.ddd')
xax.set_major_formatter('d.ddd')
# save the figure
plt.savefig(outfile, dpi=100.0)
plt.close('all')
return outfile
|
def plot_periodbase_lsp(lspinfo, outfile=None, plotdpi=100):
'''Makes a plot of periodograms obtained from `periodbase` functions.
This takes the output dict produced by any `astrobase.periodbase`
period-finder function or a pickle filename containing such a dict and makes
a periodogram plot.
Parameters
----------
lspinfo : dict or str
If lspinfo is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the `METHODLABELS` dict above,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
If lspinfo is a str, then it must be a path to a pickle file that
contains a dict of the form described above.
outfile : str or None
If this is a str, will write the periodogram plot to the file specified
by this string. If this is None, will write to a file called
'lsp-plot.png' in the current working directory.
plotdpi : int
Sets the resolution in DPI of the output periodogram plot PNG file.
Returns
-------
str
Absolute path to the periodogram plot file created.
'''
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
LOGINFO('loading LSP info from pickle %s' % lspinfo)
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
try:
# get the things to plot out of the data
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
lspmethod = lspinfo['method']
# make the LSP plot on the first subplot
plt.plot(periods, lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(PLOTYLABELS[lspmethod])
plottitle = '%s best period: %.6f d' % (METHODSHORTLABELS[lspmethod],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for bestperiod, bestpeak in zip(lspinfo['nbestperiods'],
lspinfo['nbestlspvals']):
plt.annotate('%.6f' % bestperiod,
xy=(bestperiod, bestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='x-small')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the figure
if outfile and isinstance(outfile, str):
if outfile.endswith('.png'):
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
else:
plt.savefig(outfile,bbox_inches='tight')
plt.close()
return os.path.abspath(outfile)
elif dispok:
plt.show()
plt.close()
return
else:
LOGWARNING('no output file specified and no $DISPLAY set, '
'saving to lsp-plot.png in current directory')
outfile = 'lsp-plot.png'
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
plt.close()
return os.path.abspath(outfile)
except Exception as e:
LOGEXCEPTION('could not plot this LSP, appears to be empty')
return
|
def read_hatpi_textlc(lcfile):
'''
This reads in a textlc that is complete up to the TFA stage.
'''
if 'TF1' in lcfile:
thiscoldefs = COLDEFS + [('itf1',float)]
elif 'TF2' in lcfile:
thiscoldefs = COLDEFS + [('itf2',float)]
elif 'TF3' in lcfile:
thiscoldefs = COLDEFS + [('itf3',float)]
LOGINFO('reading %s' % lcfile)
if lcfile.endswith('.gz'):
infd = gzip.open(lcfile,'r')
else:
infd = open(lcfile,'r')
with infd:
lclines = infd.read().decode().split('\n')
lclines = [x.split() for x in lclines if ('#' not in x and len(x) > 0)]
ndet = len(lclines)
if ndet > 0:
lccols = list(zip(*lclines))
lcdict = {x[0]:y for (x,y) in zip(thiscoldefs, lccols)}
# convert to ndarray
for col in thiscoldefs:
lcdict[col[0]] = np.array([col[1](x) for x in lcdict[col[0]]])
else:
lcdict = {}
LOGWARNING('no detections in %s' % lcfile)
# convert to empty ndarrays
for col in thiscoldefs:
lcdict[col[0]] = np.array([])
# add the object's name to the lcdict
hatid = HATIDREGEX.findall(lcfile)
lcdict['objectid'] = hatid[0] if hatid else 'unknown object'
# add the columns to the lcdict
lcdict['columns'] = [x[0] for x in thiscoldefs]
# add some basic info similar to usual HATLCs
lcdict['objectinfo'] = {
'ndet':ndet,
'hatid':hatid[0] if hatid else 'unknown object',
'network':'HP',
}
# break out the {stationid}-{framenum}{framesub}_{ccdnum} framekey
# into separate columns
framekeyelems = FRAMEREGEX.findall('\n'.join(lcdict['frk']))
lcdict['stf'] = np.array([(int(x[0]) if x[0].isdigit() else np.nan)
for x in framekeyelems])
lcdict['cfn'] = np.array([(int(x[1]) if x[0].isdigit() else np.nan)
for x in framekeyelems])
lcdict['cfs'] = np.array([x[2] for x in framekeyelems])
lcdict['ccd'] = np.array([(int(x[3]) if x[0].isdigit() else np.nan)
for x in framekeyelems])
# update the column list with these columns
lcdict['columns'].extend(['stf','cfn','cfs','ccd'])
# add more objectinfo: 'stations', etc.
lcdict['objectinfo']['network'] = 'HP'
lcdict['objectinfo']['stations'] = [
'HP%s' % x for x in np.unique(lcdict['stf']).tolist()
]
return lcdict
|
def lcdict_to_pickle(lcdict, outfile=None):
'''This just writes the lcdict to a pickle.
If outfile is None, then will try to get the name from the
lcdict['objectid'] and write to <objectid>-hptxtlc.pkl. If that fails, will
write to a file named hptxtlc.pkl'.
'''
if not outfile and lcdict['objectid']:
outfile = '%s-hplc.pkl' % lcdict['objectid']
elif not outfile and not lcdict['objectid']:
outfile = 'hplc.pkl'
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
if os.path.exists(outfile):
LOGINFO('lcdict for object: %s -> %s OK' % (lcdict['objectid'],
outfile))
return outfile
else:
LOGERROR('could not make a pickle for this lcdict!')
return None
|
def read_hatpi_pklc(lcfile):
'''
This just reads a pickle LC. Returns an lcdict.
'''
try:
if lcfile.endswith('.gz'):
infd = gzip.open(lcfile,'rb')
else:
infd = open(lcfile,'rb')
lcdict = pickle.load(infd)
infd.close()
return lcdict
except UnicodeDecodeError:
if lcfile.endswith('.gz'):
infd = gzip.open(lcfile,'rb')
else:
infd = open(lcfile,'rb')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % lcfile)
lcdict = pickle.load(infd, encoding='latin1')
infd.close()
return lcdict
|
def concatenate_textlcs(lclist,
sortby='rjd',
normalize=True):
'''This concatenates a list of light curves.
Does not care about overlaps or duplicates. The light curves must all be
from the same aperture.
The intended use is to concatenate light curves across CCDs or instrument
changes for a single object. These can then be normalized later using
standard astrobase tools to search for variablity and/or periodicity.
sortby is a column to sort the final concatenated light curve by in
ascending order.
If normalize is True, then each light curve's magnitude columns are
normalized to zero.
The returned lcdict has an extra column: 'lcn' that tracks which measurement
belongs to which input light curve. This can be used with
lcdict['concatenated'] which relates input light curve index to input light
curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that
contains the total number of concatenated light curves.
'''
# read the first light curve
lcdict = read_hatpi_textlc(lclist[0])
# track which LC goes where
# initial LC
lccounter = 0
lcdict['concatenated'] = {lccounter: os.path.abspath(lclist[0])}
lcdict['lcn'] = np.full_like(lcdict['rjd'], lccounter)
# normalize if needed
if normalize:
for col in MAGCOLS:
if col in lcdict:
thismedval = np.nanmedian(lcdict[col])
# handle fluxes
if col in ('ifl1','ifl2','ifl3'):
lcdict[col] = lcdict[col] / thismedval
# handle mags
else:
lcdict[col] = lcdict[col] - thismedval
# now read the rest
for lcf in lclist[1:]:
thislcd = read_hatpi_textlc(lcf)
# if the columns don't agree, skip this LC
if thislcd['columns'] != lcdict['columns']:
LOGERROR('file %s does not have the '
'same columns as first file %s, skipping...'
% (lcf, lclist[0]))
continue
# otherwise, go ahead and start concatenatin'
else:
LOGINFO('adding %s (ndet: %s) to %s (ndet: %s)'
% (lcf,
thislcd['objectinfo']['ndet'],
lclist[0],
lcdict[lcdict['columns'][0]].size))
# update LC tracking
lccounter = lccounter + 1
lcdict['concatenated'][lccounter] = os.path.abspath(lcf)
lcdict['lcn'] = np.concatenate((
lcdict['lcn'],
np.full_like(thislcd['rjd'],lccounter)
))
# concatenate the columns
for col in lcdict['columns']:
# handle normalization for magnitude columns
if normalize and col in MAGCOLS:
thismedval = np.nanmedian(thislcd[col])
# handle fluxes
if col in ('ifl1','ifl2','ifl3'):
thislcd[col] = thislcd[col] / thismedval
# handle mags
else:
thislcd[col] = thislcd[col] - thismedval
# concatenate the values
lcdict[col] = np.concatenate((lcdict[col], thislcd[col]))
#
# now we're all done concatenatin'
#
# make sure to add up the ndet
lcdict['objectinfo']['ndet'] = lcdict[lcdict['columns'][0]].size
# update the stations
lcdict['objectinfo']['stations'] = [
'HP%s' % x for x in np.unique(lcdict['stf']).tolist()
]
# update the total LC count
lcdict['nconcatenated'] = lccounter + 1
# if we're supposed to sort by a column, do so
if sortby and sortby in [x[0] for x in COLDEFS]:
LOGINFO('sorting concatenated light curve by %s...' % sortby)
sortind = np.argsort(lcdict[sortby])
# sort all the measurement columns by this index
for col in lcdict['columns']:
lcdict[col] = lcdict[col][sortind]
# make sure to sort the lcn index as well
lcdict['lcn'] = lcdict['lcn'][sortind]
LOGINFO('done. concatenated light curve has %s detections' %
lcdict['objectinfo']['ndet'])
return lcdict
|
def concatenate_textlcs_for_objectid(lcbasedir,
objectid,
aperture='TF1',
postfix='.gz',
sortby='rjd',
normalize=True,
recursive=True):
'''This concatenates all text LCs for an objectid with the given aperture.
Does not care about overlaps or duplicates. The light curves must all be
from the same aperture.
The intended use is to concatenate light curves across CCDs or instrument
changes for a single object. These can then be normalized later using
standard astrobase tools to search for variablity and/or periodicity.
lcbasedir is the directory to start searching in.
objectid is the object to search for.
aperture is the aperture postfix to use: (TF1 = aperture 1,
TF2 = aperture 2,
TF3 = aperture 3)
sortby is a column to sort the final concatenated light curve by in
ascending order.
If normalize is True, then each light curve's magnitude columns are
normalized to zero, and the whole light curve is then normalized to the
global median magnitude for each magnitude column.
If recursive is True, then the function will search recursively in lcbasedir
for any light curves matching the specified criteria. This may take a while,
especially on network filesystems.
The returned lcdict has an extra column: 'lcn' that tracks which measurement
belongs to which input light curve. This can be used with
lcdict['concatenated'] which relates input light curve index to input light
curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that
contains the total number of concatenated light curves.
'''
LOGINFO('looking for light curves for %s, aperture %s in directory: %s'
% (objectid, aperture, lcbasedir))
if recursive is False:
matching = glob.glob(os.path.join(lcbasedir,
'*%s*%s*%s' % (objectid,
aperture,
postfix)))
else:
# use recursive glob for Python 3.5+
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcbasedir,
'**',
'*%s*%s*%s' % (objectid,
aperture,
postfix)),
recursive=True)
LOGINFO('found %s files: %s' % (len(matching), repr(matching)))
# otherwise, use os.walk and glob
else:
# use os.walk to go through the directories
walker = os.walk(lcbasedir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
'*%s*%s*%s' % (objectid,
aperture,
postfix))
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO(
'found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir))
)
# now that we have all the files, concatenate them
# a single file will be returned as normalized
if matching and len(matching) > 0:
clcdict = concatenate_textlcs(matching,
sortby=sortby,
normalize=normalize)
return clcdict
else:
LOGERROR('did not find any light curves for %s and aperture %s' %
(objectid, aperture))
return None
|
def concat_write_pklc(lcbasedir,
objectid,
aperture='TF1',
postfix='.gz',
sortby='rjd',
normalize=True,
outdir=None,
recursive=True):
'''This concatenates all text LCs for the given object and writes to a pklc.
Basically a rollup for the concatenate_textlcs_for_objectid and
lcdict_to_pickle functions.
'''
concatlcd = concatenate_textlcs_for_objectid(lcbasedir,
objectid,
aperture=aperture,
sortby=sortby,
normalize=normalize,
recursive=recursive)
if not outdir:
outdir = 'pklcs'
if not os.path.exists(outdir):
os.mkdir(outdir)
outfpath = os.path.join(outdir, '%s-%s-pklc.pkl' % (concatlcd['objectid'],
aperture))
pklc = lcdict_to_pickle(concatlcd, outfile=outfpath)
return pklc
|
def parallel_concat_worker(task):
'''
This is a worker for the function below.
task[0] = lcbasedir
task[1] = objectid
task[2] = {'aperture','postfix','sortby','normalize','outdir','recursive'}
'''
lcbasedir, objectid, kwargs = task
try:
return concat_write_pklc(lcbasedir, objectid, **kwargs)
except Exception as e:
LOGEXCEPTION('failed LC concatenation for %s in %s'
% (objectid, lcbasedir))
return None
|
def parallel_concat_lcdir(lcbasedir,
objectidlist,
aperture='TF1',
postfix='.gz',
sortby='rjd',
normalize=True,
outdir=None,
recursive=True,
nworkers=32,
maxworkertasks=1000):
'''This concatenates all text LCs for the given objectidlist.
'''
if not outdir:
outdir = 'pklcs'
if not os.path.exists(outdir):
os.mkdir(outdir)
tasks = [(lcbasedir, x, {'aperture':aperture,
'postfix':postfix,
'sortby':sortby,
'normalize':normalize,
'outdir':outdir,
'recursive':recursive}) for x in objectidlist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(parallel_concat_worker, tasks)
pool.close()
pool.join()
return {x:y for (x,y) in zip(objectidlist, results)}
|
def merge_hatpi_textlc_apertures(lclist):
'''This merges all TFA text LCs with separate apertures for a single object.
The framekey column will be used as the join column across all light curves
in lclist. Missing values will be filled in with nans. This function assumes
all light curves are in the format specified in COLDEFS above and readable
by read_hatpi_textlc above (i.e. have a single column for TFA mags for a
specific aperture at the end).
'''
lcaps = {}
framekeys = []
for lc in lclist:
lcd = read_hatpi_textlc(lc)
# figure what aperture this is and put it into the lcdict. if two LCs
# with the same aperture (i.e. TF1 and TF1) are provided, the later one
# in the lclist will overwrite the previous one,
for col in lcd['columns']:
if col.startswith('itf'):
lcaps[col] = lcd
thisframekeys = lcd['frk'].tolist()
framekeys.extend(thisframekeys)
# uniqify the framekeys
framekeys = sorted(list(set(framekeys)))
|
def read_hatpi_binnedlc(binnedpklf, textlcf, timebinsec):
'''This reads a binnedlc pickle produced by the HATPI prototype pipeline.
Converts it into a standard lcdict as produced by the read_hatpi_textlc
function above by using the information in unbinnedtextlc for the same
object.
Adds a 'binned' key to the standard lcdict containing the binned mags, etc.
'''
LOGINFO('reading binned LC %s' % binnedpklf)
# read the textlc
lcdict = read_hatpi_textlc(textlcf)
# read the binned LC
if binnedpklf.endswith('.gz'):
infd = gzip.open(binnedpklf,'rb')
else:
infd = open(binnedpklf,'rb')
try:
binned = pickle.load(infd)
except Exception as e:
infd.seek(0)
binned = pickle.load(infd, encoding='latin1')
infd.close()
# now that we have both, pull out the required columns from the binnedlc
blckeys = binned.keys()
lcdict['binned'] = {}
for key in blckeys:
# get EPD stuff
if (key == 'epdlc' and
'AP0' in binned[key] and
'AP1' in binned[key] and
'AP2' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
ap1mad = np.nanmedian(np.abs(binned[key]['AP1'] -
np.nanmedian(binned[key]['AP1'])))
ap2mad = np.nanmedian(np.abs(binned[key]['AP2'] -
np.nanmedian(binned[key]['AP2'])))
lcdict['binned']['iep1'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
lcdict['binned']['iep2'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP1'],
'errs':np.full_like(binned[key]['AP1'],
ap1mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
lcdict['binned']['iep3'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP2'],
'errs':np.full_like(binned[key]['AP2'],
ap2mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# get TFA stuff for aperture 1
if ((key == 'tfalc.TF1' or key == 'tfalc.TF1.gz') and
'AP0' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
lcdict['binned']['itf1'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# get TFA stuff for aperture 1
if ((key == 'tfalc.TF2' or key == 'tfalc.TF2.gz') and
'AP0' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
lcdict['binned']['itf2'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# get TFA stuff for aperture 1
if ((key == 'tfalc.TF3' or key == 'tfalc.TF3.gz') and
'AP0' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
lcdict['binned']['itf3'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# all done, check if we succeeded
if lcdict['binned']:
return lcdict
else:
LOGERROR('no binned measurements found in %s!' % binnedpklf)
return None
|
def generate_hatpi_binnedlc_pkl(binnedpklf, textlcf, timebinsec,
outfile=None):
'''
This reads the binned LC and writes it out to a pickle.
'''
binlcdict = read_hatpi_binnedlc(binnedpklf, textlcf, timebinsec)
if binlcdict:
if outfile is None:
outfile = os.path.join(
os.path.dirname(binnedpklf),
'%s-hplc.pkl' % (
os.path.basename(binnedpklf).replace('sec-lc.pkl.gz','')
)
)
return lcdict_to_pickle(binlcdict, outfile=outfile)
else:
LOGERROR('could not read binned HATPI LC: %s' % binnedpklf)
return None
|
def parallel_gen_binnedlc_pkls(binnedpkldir,
textlcdir,
timebinsec,
binnedpklglob='*binned*sec*.pkl',
textlcglob='*.tfalc.TF1*'):
'''
This generates the binnedlc pkls for a directory of such files.
FIXME: finish this
'''
binnedpkls = sorted(glob.glob(os.path.join(binnedpkldir, binnedpklglob)))
# find all the textlcs associated with these
textlcs = []
for bpkl in binnedpkls:
objectid = HATIDREGEX.findall(bpkl)
if objectid is not None:
objectid = objectid[0]
searchpath = os.path.join(textlcdir, '%s-%s' % (objectid, textlcglob))
textlcf = glob.glob(searchpath)
if textlcf:
textlcs.append(textlcf)
else:
textlcs.append(None)
|
def pklc_fovcatalog_objectinfo(
pklcdir,
fovcatalog,
fovcatalog_columns=[0,1,2,
6,7,
8,9,
10,11,
13,14,15,16,
17,18,19,
20,21],
fovcatalog_colnames=['objectid','ra','decl',
'jmag','jmag_err',
'hmag','hmag_err',
'kmag','kmag_err',
'bmag','vmag','rmag','imag',
'sdssu','sdssg','sdssr',
'sdssi','sdssz'],
fovcatalog_colformats=('U20,f8,f8,'
'f8,f8,'
'f8,f8,'
'f8,f8,'
'f8,f8,f8,f8,'
'f8,f8,f8,'
'f8,f8')
):
'''Adds catalog info to objectinfo key of all pklcs in lcdir.
If fovcatalog, fovcatalog_columns, fovcatalog_colnames are provided, uses
them to find all the additional information listed in the fovcatalog_colname
keys, and writes this info to the objectinfo key of each lcdict. This makes
it easier for astrobase tools to work on these light curve.
The default set up for fovcatalog is to use a text file generated by the
HATPI pipeline before auto-calibrating a field. The format is specified as
above in _columns, _colnames, and _colformats.
'''
if fovcatalog.endswith('.gz'):
catfd = gzip.open(fovcatalog)
else:
catfd = open(fovcatalog)
# read the catalog using the colformats, etc.
fovcat = np.genfromtxt(catfd,
usecols=fovcatalog_columns,
names=fovcatalog_colnames,
dtype=fovcatalog_colformats)
catfd.close()
pklclist = sorted(glob.glob(os.path.join(pklcdir, '*HAT*-pklc.pkl')))
updatedpklcs, failedpklcs = [], []
for pklc in pklclist:
lcdict = read_hatpi_pklc(pklc)
objectid = lcdict['objectid']
catind = np.where(fovcat['objectid'] == objectid)
# if we found catalog info for this object, put it into objectinfo
if len(catind) > 0 and catind[0]:
lcdict['objectinfo'].update(
{x:y for x,y in zip(
fovcatalog_colnames,
[np.asscalar(fovcat[z][catind]) for
z in fovcatalog_colnames]
)
}
)
# write the LC back to the pickle (tempfile for atomicity)
with open(pklc+'-tmp','wb') as outfd:
pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)
# overwrite previous once we know it exists
if os.path.exists(pklc+'-tmp'):
shutil.move(pklc+'-tmp',pklc)
LOGINFO('updated %s with catalog info for %s at %.3f, %.3f OK' %
(pklc, objectid,
lcdict['objectinfo']['ra'],
lcdict['objectinfo']['decl']))
updatedpklcs.append(pklc)
# otherwise, do nothing
else:
failedpklcs.append(pklc)
# end of pklclist processing
return updatedpklcs, failedpklcs
|
def _base64_to_file(b64str, outfpath, writetostrio=False):
'''This converts the base64 encoded string to a file.
Parameters
----------
b64str : str
A base64 encoded strin that is the output of `base64.b64encode`.
outfpath : str
The path to where the file will be written. This should include an
appropriate extension for the file (e.g. a base64 encoded string that
represents a PNG should have its `outfpath` end in a '.png') so the OS
can open these files correctly.
writetostrio : bool
If this is True, will return a StringIO object with the binary stream
decoded from the base64-encoded input string `b64str`. This can be
useful to embed these into other files without having to write them to
disk.
Returns
-------
str or StringIO object
If `writetostrio` is False, will return the output file's path as a
str. If it is True, will return a StringIO object directly. If writing
the file fails in either case, will return None.
'''
try:
filebytes = base64.b64decode(b64str)
# if we're writing back to a stringio object
if writetostrio:
outobj = StrIO(filebytes)
return outobj
# otherwise, we're writing to an actual file
else:
with open(outfpath,'wb') as outfd:
outfd.write(filebytes)
if os.path.exists(outfpath):
return outfpath
else:
LOGERROR('could not write output file: %s' % outfpath)
return None
except Exception as e:
LOGEXCEPTION('failed while trying to convert '
'b64 string to file %s' % outfpath)
return None
|
def _read_checkplot_picklefile(checkplotpickle):
'''This reads a checkplot gzipped pickle file back into a dict.
NOTE: the try-except is for Python 2 pickles that have numpy arrays in
them. Apparently, these aren't compatible with Python 3. See here:
http://stackoverflow.com/q/11305790
The workaround is noted in this answer:
http://stackoverflow.com/a/41366785
Parameters
----------
checkplotpickle : str
The path to a checkplot pickle file. This can be a gzipped file (in
which case the file extension should end in '.gz')
Returns
-------
dict
This returns a checkplotdict.
'''
if checkplotpickle.endswith('.gz'):
try:
with gzip.open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd)
except UnicodeDecodeError:
with gzip.open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd, encoding='latin1')
else:
try:
with open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd)
except UnicodeDecodeError:
with open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd, encoding='latin1')
return cpdict
|
def _write_checkplot_picklefile(checkplotdict,
outfile=None,
protocol=None,
outgzip=False):
'''This writes the checkplotdict to a (gzipped) pickle file.
Parameters
----------
checkplotdict : dict
This the checkplotdict to write to the pickle file.
outfile : None or str
The path to the output pickle file to write. If `outfile` is None,
writes a (gzipped) pickle file of the form:
checkplot-{objectid}.pkl(.gz)
to the current directory.
protocol : int
This sets the pickle file protocol to use when writing the pickle:
If None, will choose a protocol using the following rules:
- 4 -> default in Python >= 3.4 - fast but incompatible with Python 2
- 3 -> default in Python 3.0-3.3 - mildly fast
- 2 -> default in Python 2 - very slow, but compatible with Python 2/3
The default protocol kwarg is None, this will make an automatic choice
for pickle protocol that's best suited for the version of Python in
use. Note that this will make pickles generated by Py3 incompatible with
Py2.
outgzip : bool
If this is True, will gzip the output file. Note that if the `outfile`
str ends in a gzip, this will be automatically turned on.
Returns
-------
str
The absolute path to the written checkplot pickle file. None if writing
fails.
'''
# figure out which protocol to use
# for Python >= 3.4; use v4 by default
if ((sys.version_info[0:2] >= (3,4) and not protocol) or
(protocol > 2)):
protocol = 4
elif ((sys.version_info[0:2] >= (3,0) and not protocol) or
(protocol > 2)):
protocol = 3
# for Python == 2.7; use v2
elif sys.version_info[0:2] == (2,7) and not protocol:
protocol = 2
# otherwise, if left unspecified, use the slowest but most compatible
# protocol. this will be readable by all (most?) Pythons
elif not protocol:
protocol = 0
if outgzip:
if not outfile:
outfile = (
'checkplot-{objectid}.pkl.gz'.format(
objectid=squeeze(checkplotdict['objectid']).replace(' ','-')
)
)
with gzip.open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
else:
if not outfile:
outfile = (
'checkplot-{objectid}.pkl'.format(
objectid=squeeze(checkplotdict['objectid']).replace(' ','-')
)
)
# make sure to do the right thing if '.gz' is in the filename but
# outgzip was False
if outfile.endswith('.gz'):
LOGWARNING('output filename ends with .gz but kwarg outgzip=False. '
'will use gzip to compress the output pickle')
with gzip.open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
else:
with open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
return os.path.abspath(outfile)
|
def get_phased_quantities(stimes, smags, serrs, period):
'''Does phase-folding for the mag/flux time-series given a period.
Given finite and sigma-clipped times, magnitudes, and errors, along with the
period at which to phase-fold the data, perform the phase-folding and
return the phase-folded values.
Parameters
----------
stimes,smags,serrs : np.array
The sigma-clipped and finite input mag/flux time-series arrays to
operate on.
period : float
The period to phase the mag/flux time-series at. stimes.min() is used as
the epoch value to fold the times-series around.
Returns
-------
(phase, pmags, perrs, ptimes, mintime) : tuple
The tuple returned contains the following items:
- `phase`: phase-sorted values of phase at each of stimes
- `pmags`: phase-sorted magnitudes at each phase
- `perrs`: phase-sorted errors
- `ptimes`: phase-sorted times
- `mintime`: earliest time in stimes.
'''
# phase the mag series using the given period and faintest mag time
# mintime = stimes[npwhere(smags == npmax(smags))]
# phase the mag series using the given period and epoch = min(stimes)
mintime = np.min(stimes)
# calculate the unsorted phase, then sort it
iphase = (stimes - mintime)/period - np.floor((stimes - mintime)/period)
phasesortind = np.argsort(iphase)
# these are the final quantities to use for the Fourier fits
phase = iphase[phasesortind]
pmags = smags[phasesortind]
perrs = serrs[phasesortind]
# get the times sorted in phase order (useful to get the fit mag minimum
# with respect to phase -- the light curve minimum)
ptimes = stimes[phasesortind]
return phase, pmags, perrs, ptimes, mintime
|
def make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=False,
wrap=False,
model_over_lc=False):
'''This makes a plot of the LC model fit.
Parameters
----------
phase,pmags,perrs : np.array
The actual mag/flux time-series.
fitmags : np.array
The model fit time-series.
period : float
The period at which the phased LC was generated.
mintime : float
The minimum time value.
magseriesepoch : float
The value of time around which the phased LC was folded.
plotfit : str
The name of a file to write the plot to.
magsarefluxes : bool
Set this to True if the values in `pmags` and `fitmags` are actually
fluxes.
wrap : bool
If True, will wrap the phased LC around 0.0 to make some phased LCs
easier to look at.
model_over_lc : bool
Usually, this function will plot the actual LC over the model LC. Set
this to True to plot the model over the actual LC; this is most useful
when you have a very dense light curve and want to be able to see how it
follows the model.
Returns
-------
Nothing.
'''
# set up the figure
plt.close('all')
plt.figure(figsize=(8,4.8))
if model_over_lc:
model_z = 100
lc_z = 0
else:
model_z = 0
lc_z = 100
if not wrap:
plt.plot(phase, fitmags, linewidth=3.0, color='red',zorder=model_z)
plt.plot(phase,pmags,
marker='o',
markersize=1.0,
linestyle='none',
rasterized=True, color='k',zorder=lc_z)
# set the x axis ticks and label
plt.gca().set_xticks(
[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
)
else:
plt.plot(np.concatenate([phase-1.0,phase]),
np.concatenate([fitmags,fitmags]),
linewidth=3.0,
color='red',zorder=model_z)
plt.plot(np.concatenate([phase-1.0,phase]),
np.concatenate([pmags,pmags]),
marker='o',
markersize=1.0,
linestyle='none',
rasterized=True, color='k',zorder=lc_z)
plt.gca().set_xlim((-0.8,0.8))
# set the x axis ticks and label
plt.gca().set_xticks(
[-0.8,-0.7,-0.6,-0.5,-0.4,-0.3,-0.2,-0.1,
0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]
)
# set the y axis limit and label
ymin, ymax = plt.ylim()
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('flux')
plt.xlabel('phase')
plt.title('period: %.6f, folded at %.6f, fit epoch: %.6f' %
(period, mintime, magseriesepoch))
plt.savefig(plotfit)
plt.close()
|
def objectlist_conesearch(racenter,
declcenter,
searchradiusarcsec,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'l','b',
'parallax', 'parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
extra_filter=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a list of objects near the coords.
Runs a conesearch around `(racenter, declcenter)` with radius in arcsec of
`searchradiusarcsec`.
Parameters
----------
racenter,declcenter : float
The center equatorial coordinates in decimal degrees.
searchradiusarcsec : float
The search radius of the cone-search in arcseconds.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
extra_filter: str or None
If this is provided, must be a valid ADQL filter string that is used to
further filter the cone-search results.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the awesome query generator at:
# https://gea.esac.esa.int/archive/
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns}, "
"(DISTANCE(POINT('ICRS', "
"{{table}}.ra, {{table}}.dec), "
"POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 "
"AS dist_arcsec "
"from {{table}} where "
"CONTAINS(POINT('ICRS',{{table}}.ra, {{table}}.dec),"
"CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{search_radius:.6f}))=1 "
"{extra_filter_str}"
"ORDER by dist_arcsec asc "
)
if extra_filter is not None:
extra_filter_str = ' and %s ' % extra_filter
else:
extra_filter_str = ''
formatted_query = query.format(ra_center=racenter,
decl_center=declcenter,
search_radius=searchradiusarcsec/3600.0,
extra_filter_str=extra_filter_str,
columns=', '.join(columns))
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
|
def objectlist_radeclbox(radeclbox,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'l','b',
'parallax, parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
extra_filter=None,
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a list of objects in an equatorial
coordinate box.
Parameters
----------
radeclbox : sequence of four floats
This defines the box to search in::
[ra_min, ra_max, decl_min, decl_max]
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
extra_filter: str or None
If this is provided, must be a valid ADQL filter string that is used to
further filter the cone-search results.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the awesome query generator at:
# https://gea.esac.esa.int/archive/
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns} from {{table}} where "
"CONTAINS(POINT('ICRS',{{table}}.ra, {{table}}.dec),"
"BOX('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{ra_width:.5f},{decl_height:.5f}))=1"
"{extra_filter_str}"
)
ra_min, ra_max, decl_min, decl_max = radeclbox
ra_center = (ra_max + ra_min)/2.0
decl_center = (decl_max + decl_min)/2.0
ra_width = ra_max - ra_min
decl_height = decl_max - decl_min
if extra_filter is not None:
extra_filter_str = ' and %s ' % extra_filter
else:
extra_filter_str = ''
formatted_query = query.format(columns=', '.join(columns),
extra_filter_str=extra_filter_str,
ra_center=ra_center,
decl_center=decl_center,
ra_width=ra_width,
decl_height=decl_height)
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
|
def objectid_search(gaiaid,
gaia_mirror=None,
columns=('source_id',
'ra','dec',
'phot_g_mean_mag',
'phot_bp_mean_mag',
'phot_rp_mean_mag',
'l','b',
'parallax, parallax_error',
'pmra','pmra_error',
'pmdec','pmdec_error'),
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/gaia-cache',
verbose=True,
timeout=15.0,
refresh=2.0,
maxtimeout=300.0,
maxtries=3,
complete_query_later=True):
'''This queries the GAIA TAP service for a single GAIA source ID.
Parameters
----------
gaiaid : str
The source ID of the object whose info will be collected.
gaia_mirror : {'gaia','heidelberg','vizier'} or None
This is the key used to select a GAIA catalog mirror from the
`GAIA_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
columns : sequence of str
This indicates which columns from the GAIA table to request for the
objects found within the search radius.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
completequerylater : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# NOTE: here we don't resolve the table name right away. this is because
# some of the GAIA mirrors use different table names, so we leave the table
# name to be resolved by the lower level tap_query function. this is done by
# the {{table}} construct.
query = (
"select {columns} from {{table}} where "
"source_id = {gaiaid}"
)
formatted_query = query.format(columns=', '.join(columns),
gaiaid=gaiaid)
return tap_query(formatted_query,
gaia_mirror=gaia_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later)
|
def generalized_lsp_value(times, mags, errs, omega):
'''Generalized LSP value for a single omega.
The relations used are::
P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)
where: YC, YS, CC, and SS are all calculated at T
and where: tan 2omegaT = 2*CS/(CC - SS)
and where:
Y = sum( w_i*y_i )
C = sum( w_i*cos(wT_i) )
S = sum( w_i*sin(wT_i) )
YY = sum( w_i*y_i*y_i ) - Y*Y
YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
YS = sum( w_i*y_i*sin(wT_i) ) - Y*S
CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
CC = CpC - C*C
SS = (1 - CpC) - S*S
CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S
Parameters
----------
times,mags,errs : np.array
The time-series to calculate the periodogram value for.
omega : float
The frequency to calculate the periodogram value at.
Returns
-------
periodogramvalue : float
The normalized periodogram at the specified test frequency `omega`.
'''
one_over_errs2 = 1.0/(errs*errs)
W = npsum(one_over_errs2)
wi = one_over_errs2/W
sin_omegat = npsin(omega*times)
cos_omegat = npcos(omega*times)
sin2_omegat = sin_omegat*sin_omegat
cos2_omegat = cos_omegat*cos_omegat
sincos_omegat = sin_omegat*cos_omegat
# calculate some more sums and terms
Y = npsum( wi*mags )
C = npsum( wi*cos_omegat )
S = npsum( wi*sin_omegat )
CpS = npsum( wi*sincos_omegat )
CpC = npsum( wi*cos2_omegat )
CS = CpS - C*S
CC = CpC - C*C
SS = 1 - CpC - S*S # use SpS = 1 - CpC
# calculate tau
tan_omega_tau_top = 2.0*CS
tan_omega_tau_bottom = CC - SS
tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom
tau = nparctan(tan_omega_tau)/(2.0*omega)
YpY = npsum( wi*mags*mags)
YpC = npsum( wi*mags*cos_omegat )
YpS = npsum( wi*mags*sin_omegat )
# SpS = npsum( wi*sin2_omegat )
# the final terms
YY = YpY - Y*Y
YC = YpC - Y*C
YS = YpS - Y*S
periodogramvalue = (YC*YC/CC + YS*YS/SS)/YY
return periodogramvalue
|
def generalized_lsp_value_withtau(times, mags, errs, omega):
'''Generalized LSP value for a single omega.
This uses tau to provide an arbitrary time-reference point.
The relations used are::
P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)
where: YC, YS, CC, and SS are all calculated at T
and where: tan 2omegaT = 2*CS/(CC - SS)
and where:
Y = sum( w_i*y_i )
C = sum( w_i*cos(wT_i) )
S = sum( w_i*sin(wT_i) )
YY = sum( w_i*y_i*y_i ) - Y*Y
YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
YS = sum( w_i*y_i*sin(wT_i) ) - Y*S
CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
CC = CpC - C*C
SS = (1 - CpC) - S*S
CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S
Parameters
----------
times,mags,errs : np.array
The time-series to calculate the periodogram value for.
omega : float
The frequency to calculate the periodogram value at.
Returns
-------
periodogramvalue : float
The normalized periodogram at the specified test frequency `omega`.
'''
one_over_errs2 = 1.0/(errs*errs)
W = npsum(one_over_errs2)
wi = one_over_errs2/W
sin_omegat = npsin(omega*times)
cos_omegat = npcos(omega*times)
sin2_omegat = sin_omegat*sin_omegat
cos2_omegat = cos_omegat*cos_omegat
sincos_omegat = sin_omegat*cos_omegat
# calculate some more sums and terms
Y = npsum( wi*mags )
C = npsum( wi*cos_omegat )
S = npsum( wi*sin_omegat )
CpS = npsum( wi*sincos_omegat )
CpC = npsum( wi*cos2_omegat )
CS = CpS - C*S
CC = CpC - C*C
SS = 1 - CpC - S*S # use SpS = 1 - CpC
# calculate tau
tan_omega_tau_top = 2.0*CS
tan_omega_tau_bottom = CC - SS
tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom
tau = nparctan(tan_omega_tau)/(2.0*omega)
# now we need to calculate all the bits at tau
sin_omega_tau = npsin(omega*(times - tau))
cos_omega_tau = npcos(omega*(times - tau))
sin2_omega_tau = sin_omega_tau*sin_omega_tau
cos2_omega_tau = cos_omega_tau*cos_omega_tau
sincos_omega_tau = sin_omega_tau*cos_omega_tau
C_tau = npsum(wi*cos_omega_tau)
S_tau = npsum(wi*sin_omega_tau)
CpS_tau = npsum( wi*sincos_omega_tau )
CpC_tau = npsum( wi*cos2_omega_tau )
CS_tau = CpS_tau - C_tau*S_tau
CC_tau = CpC_tau - C_tau*C_tau
SS_tau = 1 - CpC_tau - S_tau*S_tau # use SpS = 1 - CpC
YpY = npsum( wi*mags*mags)
YpC_tau = npsum( wi*mags*cos_omega_tau )
YpS_tau = npsum( wi*mags*sin_omega_tau )
# SpS = npsum( wi*sin2_omegat )
# the final terms
YY = YpY - Y*Y
YC_tau = YpC_tau - Y*C_tau
YS_tau = YpS_tau - Y*S_tau
periodogramvalue = (YC_tau*YC_tau/CC_tau + YS_tau*YS_tau/SS_tau)/YY
return periodogramvalue
|
def generalized_lsp_value_notau(times, mags, errs, omega):
'''
This is the simplified version not using tau.
The relations used are::
W = sum (1.0/(errs*errs) )
w_i = (1/W)*(1/(errs*errs))
Y = sum( w_i*y_i )
C = sum( w_i*cos(wt_i) )
S = sum( w_i*sin(wt_i) )
YY = sum( w_i*y_i*y_i ) - Y*Y
YC = sum( w_i*y_i*cos(wt_i) ) - Y*C
YS = sum( w_i*y_i*sin(wt_i) ) - Y*S
CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) )
CC = CpC - C*C
SS = (1 - CpC) - S*S
CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S
D(omega) = CC*SS - CS*CS
P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
Parameters
----------
times,mags,errs : np.array
The time-series to calculate the periodogram value for.
omega : float
The frequency to calculate the periodogram value at.
Returns
-------
periodogramvalue : float
The normalized periodogram at the specified test frequency `omega`.
'''
one_over_errs2 = 1.0/(errs*errs)
W = npsum(one_over_errs2)
wi = one_over_errs2/W
sin_omegat = npsin(omega*times)
cos_omegat = npcos(omega*times)
sin2_omegat = sin_omegat*sin_omegat
cos2_omegat = cos_omegat*cos_omegat
sincos_omegat = sin_omegat*cos_omegat
# calculate some more sums and terms
Y = npsum( wi*mags )
C = npsum( wi*cos_omegat )
S = npsum( wi*sin_omegat )
YpY = npsum( wi*mags*mags)
YpC = npsum( wi*mags*cos_omegat )
YpS = npsum( wi*mags*sin_omegat )
CpC = npsum( wi*cos2_omegat )
# SpS = npsum( wi*sin2_omegat )
CpS = npsum( wi*sincos_omegat )
# the final terms
YY = YpY - Y*Y
YC = YpC - Y*C
YS = YpS - Y*S
CC = CpC - C*C
SS = 1 - CpC - S*S # use SpS = 1 - CpC
CS = CpS - C*S
# P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
# D(omega) = CC*SS - CS*CS
Domega = CC*SS - CS*CS
lspval = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*Domega)
return lspval
|
def specwindow_lsp_value(times, mags, errs, omega):
'''This calculates the peak associated with the spectral window function
for times and at the specified omega.
NOTE: this is classical Lomb-Scargle, not the Generalized
Lomb-Scargle. `mags` and `errs` are silently ignored since we're calculating
the periodogram of the observing window function. These are kept to present
a consistent external API so the `pgen_lsp` function below can call this
transparently.
Parameters
----------
times,mags,errs : np.array
The time-series to calculate the periodogram value for.
omega : float
The frequency to calculate the periodogram value at.
Returns
-------
periodogramvalue : float
The normalized periodogram at the specified test frequency `omega`.
'''
norm_times = times - times.min()
tau = (
(1.0/(2.0*omega)) *
nparctan( npsum(npsin(2.0*omega*norm_times)) /
npsum(npcos(2.0*omega*norm_times)) )
)
lspval_top_cos = (npsum(1.0 * npcos(omega*(norm_times-tau))) *
npsum(1.0 * npcos(omega*(norm_times-tau))))
lspval_bot_cos = npsum( (npcos(omega*(norm_times-tau))) *
(npcos(omega*(norm_times-tau))) )
lspval_top_sin = (npsum(1.0 * npsin(omega*(norm_times-tau))) *
npsum(1.0 * npsin(omega*(norm_times-tau))))
lspval_bot_sin = npsum( (npsin(omega*(norm_times-tau))) *
(npsin(omega*(norm_times-tau))) )
lspval = 0.5 * ( (lspval_top_cos/lspval_bot_cos) +
(lspval_top_sin/lspval_bot_sin) )
return lspval
|
def pgen_lsp(
times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
stepsize=1.0e-4,
autofreq=True,
nbestpeaks=5,
periodepsilon=0.1,
sigclip=10.0,
nworkers=None,
workchunksize=None,
glspfunc=_glsp_worker_withtau,
verbose=True
):
'''This calculates the generalized Lomb-Scargle periodogram.
Uses the algorithm from Zechmeister and Kurster (2009).
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
workchunksize : None or int
If this is an int, will use chunks of the given size to break up the
work for the parallel workers. If None, the chunk size is set to 1.
glspfunc : Python function
The worker function to use to calculate the periodogram. This can be
used to make this function calculate the time-series sampling window
function instead of the time-series measurements' GLS periodogram by
passing in `_glsp_worker_specwindow` instead of the default
`_glsp_worker_withtau` function.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'gls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the frequencies to use
if startp:
endf = 1.0/startp
else:
# default start period is 0.1 day
endf = 1.0/0.1
if endp:
startf = 1.0/endp
else:
# default end period is length of time series
startf = 1.0/(stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
omegas = 2*pi_value*nparange(startf, endf, stepsize)
if verbose:
LOGINFO(
'using %s frequency points, start P = %.3f, end P = %.3f' %
(omegas.size, 1.0/endf, 1.0/startf)
)
else:
# this gets an automatic grid of frequencies to use
freqs = get_frequency_grid(stimes,
minfreq=startf,
maxfreq=endf)
omegas = 2*pi_value*freqs
if verbose:
LOGINFO(
'using autofreq with %s frequency points, '
'start P = %.3f, end P = %.3f' %
(omegas.size, 1.0/freqs.max(), 1.0/freqs.min())
)
# map to parallel workers
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(stimes, smags, serrs, x) for x in omegas]
if workchunksize:
lsp = pool.map(glspfunc, tasks, chunksize=workchunksize)
else:
lsp = pool.map(glspfunc, tasks)
pool.close()
pool.join()
del pool
lsp = nparray(lsp)
periods = 2.0*pi_value/omegas
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# highest value first 2. go down the values until we find five
# values that are separated by at least periodepsilon in period
# make sure to filter out non-finite values of lsp
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'omegas':omegas,
'periods':None,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different peak
# in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
return {'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'omegas':omegas,
'periods':periods,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'omegas':None,
'periods':None,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
|
def specwindow_lsp(
times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
stepsize=1.0e-4,
autofreq=True,
nbestpeaks=5,
periodepsilon=0.1,
sigclip=10.0,
nworkers=None,
glspfunc=_glsp_worker_specwindow,
verbose=True
):
'''This calculates the spectral window function.
Wraps the `pgen_lsp` function above to use the specific worker for
calculating the window-function.
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
glspfunc : Python function
The worker function to use to calculate the periodogram. This is used to
used to make the `pgen_lsp` function calculate the time-series sampling
window function instead of the time-series measurements' GLS periodogram
by passing in `_glsp_worker_specwindow` instead of the default
`_glsp_worker` function.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'win' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# run the LSP using glsp_worker_specwindow as the worker
lspres = pgen_lsp(
times,
mags,
errs,
magsarefluxes=magsarefluxes,
startp=startp,
endp=endp,
autofreq=autofreq,
nbestpeaks=nbestpeaks,
periodepsilon=periodepsilon,
stepsize=stepsize,
nworkers=nworkers,
sigclip=sigclip,
glspfunc=glspfunc,
verbose=verbose
)
# update the resultdict to indicate we're a spectral window function
lspres['method'] = 'win'
if lspres['lspvals'] is not None:
# renormalize the periodogram to between 0 and 1 like the usual GLS.
lspmax = npnanmax(lspres['lspvals'])
if npisfinite(lspmax):
lspres['lspvals'] = lspres['lspvals']/lspmax
lspres['nbestlspvals'] = [
x/lspmax for x in lspres['nbestlspvals']
]
lspres['bestlspval'] = lspres['bestlspval']/lspmax
return lspres
|
def analytic_false_alarm_probability(lspinfo,
times,
conservative_nfreq_eff=True,
peakvals=None,
inplace=True):
'''This returns the analytic false alarm probabilities for periodogram
peak values.
The calculation follows that on page 3 of Zechmeister & Kurster (2009)::
FAP = 1 − [1 − Prob(z > z0)]**M
where::
M is the number of independent frequencies
Prob(z > z0) is the probability of peak with value > z0
z0 is the peak value we're evaluating
Parameters
----------
lspinfo : dict
The dict returned by the :py:func:`~astrobase.periodbase.zgls.pgen_lsp`
function.
times : np.array
The times for which the periodogram result in ``lspinfo`` was
calculated.
conservative_nfreq_eff : bool
If True, will follow the prescription given in Schwarzenberg-Czerny
(2003):
http://adsabs.harvard.edu/abs/2003ASPC..292..383S
and estimate the effective number of independent frequences M_eff as::
min(N_obs, N_freq, DELTA_f/delta_f)
peakvals : sequence or None
The peak values for which to evaluate the false-alarm probability. If
None, will calculate this for each of the peak values in the
``nbestpeaks`` key of the ``lspinfo`` dict.
inplace : bool
If True, puts the results of the FAP calculation into the ``lspinfo``
dict as a list available as ``lspinfo['falsealarmprob']``.
Returns
-------
list
The calculated false alarm probabilities for each of the peak values in
``peakvals``.
'''
frequencies = 1.0/lspinfo['periods']
M = independent_freq_count(frequencies,
times,
conservative=conservative_nfreq_eff)
if peakvals is None:
peakvals = lspinfo['nbestlspvals']
prob_exceed_vals = [
probability_peak_exceeds_value(times, p) for p in peakvals
]
false_alarm_probs = [
1.0 - (1.0 - prob_exc)**M for prob_exc in prob_exceed_vals
]
if inplace:
lspinfo['falsealarmprob'] = false_alarm_probs
return false_alarm_probs
|
def check_existing_apikey(lcc_server):
'''This validates if an API key for the specified LCC-Server is available.
API keys are stored using the following file scheme::
~/.astrobase/lccs/apikey-domain.of.lccserver.org
e.g. for the HAT LCC-Server at https://data.hatsurveys.org::
~/.astrobase/lccs/apikey-https-data.hatsurveys.org
Parameters
----------
lcc_server : str
The base URL of the LCC-Server for which the existence of API keys will
be checked.
Returns
-------
(apikey_ok, apikey_str, expiry) : tuple
The returned tuple contains the status of the API key, the API key
itself if present, and its expiry date if present.
'''
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME,
'.astrobase',
'lccs',
'apikey-%s' % lcc_server.replace(
'https://',
'https-'
).replace(
'http://',
'http-'
))
if os.path.exists(APIKEYFILE):
# check if this file is readable/writeable by user only
fileperm = oct(os.stat(APIKEYFILE)[stat.ST_MODE])
if fileperm == '0100600' or fileperm == '0o100600':
with open(APIKEYFILE) as infd:
apikey, expires = infd.read().strip('\n').split()
# get today's datetime
now = datetime.now(utc)
if sys.version_info[:2] < (3,7):
# this hideous incantation is required for lesser Pythons
expdt = datetime.strptime(
expires.replace('Z',''),
'%Y-%m-%dT%H:%M:%S.%f'
).replace(tzinfo=utc)
else:
expdt = datetime.fromisoformat(expires.replace('Z','+00:00'))
if now > expdt:
LOGERROR('API key has expired. expiry was on: %s' % expires)
return False, apikey, expires
else:
return True, apikey, expires
else:
LOGWARNING('The API key file %s has bad permissions '
'and is insecure, not reading it.\n'
'(you need to chmod 600 this file)'
% APIKEYFILE)
return False, None, None
else:
LOGWARNING('No LCC-Server API key '
'found in: {apikeyfile}'.format(apikeyfile=APIKEYFILE))
return False, None, None
|
def get_new_apikey(lcc_server):
'''This gets a new API key from the specified LCC-Server.
NOTE: this only gets an anonymous API key. To get an API key tied to a user
account (and associated privilege level), see the `import_apikey` function
below.
Parameters
----------
lcc_server : str
The base URL of the LCC-Server from where the API key will be fetched.
Returns
-------
(apikey, expiry) : tuple
This returns a tuple with the API key and its expiry date.
'''
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME,
'.astrobase',
'lccs',
'apikey-%s' % lcc_server.replace(
'https://',
'https-'
).replace(
'http://',
'http-'
))
# url for getting an API key
url = '%s/api/key' % lcc_server
# get the API key
resp = urlopen(url)
if resp.code == 200:
respdict = json.loads(resp.read())
else:
LOGERROR('could not fetch the API key from LCC-Server at: %s' %
lcc_server)
LOGERROR('the HTTP status code was: %s' % resp.status_code)
return None
#
# now that we have an API key dict, get the API key out of it and write it
# to the APIKEYFILE
#
apikey = respdict['result']['apikey']
expires = respdict['result']['expires']
# write this to the apikey file
if not os.path.exists(os.path.dirname(APIKEYFILE)):
os.makedirs(os.path.dirname(APIKEYFILE))
with open(APIKEYFILE,'w') as outfd:
outfd.write('%s %s\n' % (apikey, expires))
# chmod it to the correct value
os.chmod(APIKEYFILE, 0o100600)
LOGINFO('key fetched successfully from: %s. expires on: %s' % (lcc_server,
expires))
LOGINFO('written to: %s' % APIKEYFILE)
return apikey, expires
|
def import_apikey(lcc_server, apikey_text_json):
'''This imports an API key from text and writes it to the cache dir.
Use this with the JSON text copied from the API key text box on your
LCC-Server user home page. The API key will thus be tied to the privileges
of that user account and can then access objects, datasets, and collections
marked as private for the user only or shared with that user.
Parameters
----------
lcc_server : str
The base URL of the LCC-Server to get the API key for.
apikey_text_json : str
The JSON string from the API key text box on the user's LCC-Server home
page at `lcc_server/users/home`.
Returns
-------
(apikey, expiry) : tuple
This returns a tuple with the API key and its expiry date.
'''
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME,
'.astrobase',
'lccs',
'apikey-%s' % lcc_server.replace(
'https://',
'https-'
).replace(
'http://',
'http-'
))
respdict = json.loads(apikey_text_json)
#
# now that we have an API key dict, get the API key out of it and write it
# to the APIKEYFILE
#
apikey = respdict['apikey']
expires = respdict['expires']
# write this to the apikey file
if not os.path.exists(os.path.dirname(APIKEYFILE)):
os.makedirs(os.path.dirname(APIKEYFILE))
with open(APIKEYFILE,'w') as outfd:
outfd.write('%s %s\n' % (apikey, expires))
# chmod it to the correct value
os.chmod(APIKEYFILE, 0o100600)
LOGINFO('key fetched successfully from: %s. expires on: %s' % (lcc_server,
expires))
LOGINFO('written to: %s' % APIKEYFILE)
return apikey, expires
|
def submit_post_searchquery(url, data, apikey):
'''This submits a POST query to an LCC-Server search API endpoint.
Handles streaming of the results, and returns the final JSON stream. Also
handles results that time out.
Parameters
----------
url : str
The URL of the search API endpoint to hit. This is something like
`https://data.hatsurveys.org/api/conesearch`
data : dict
A dict of the search query parameters to pass to the search service.
apikey : str
The API key to use to access the search service. API keys are required
for all POST request made to an LCC-Server's API endpoints.
Returns
-------
(status_flag, data_dict, dataset_id) : tuple
This returns a tuple containing the status of the request: ('complete',
'failed', 'background', etc.), a dict parsed from the JSON result of the
request, and a dataset ID, which can be used to reconstruct the URL on
the LCC-Server where the results can be browsed.
'''
# first, we need to convert any columns and collections items to broken out
# params
postdata = {}
for key in data:
if key == 'columns':
postdata['columns[]'] = data[key]
elif key == 'collections':
postdata['collections[]'] = data[key]
else:
postdata[key] = data[key]
# do the urlencode with doseq=True
# we also need to encode to bytes
encoded_postdata = urlencode(postdata, doseq=True).encode()
# if apikey is not None, add it in as an Authorization: Bearer [apikey]
# header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
LOGINFO('submitting search query to LCC-Server API URL: %s' % url)
try:
# hit the server with a POST request
req = Request(url, data=encoded_postdata, headers=headers)
resp = urlopen(req)
if resp.code == 200:
# we'll iterate over the lines in the response
# this works super-well for ND-JSON!
for line in resp:
data = json.loads(line)
msg = data['message']
status = data['status']
if status != 'failed':
LOGINFO('status: %s, %s' % (status, msg))
else:
LOGERROR('status: %s, %s' % (status, msg))
# here, we'll decide what to do about the query
# completed query or query sent to background...
if status in ('ok','background'):
setid = data['result']['setid']
# save the data pickle to astrobase lccs directory
outpickle = os.path.join(os.path.expanduser('~'),
'.astrobase',
'lccs',
'query-%s.pkl' % setid)
if not os.path.exists(os.path.dirname(outpickle)):
os.makedirs(os.path.dirname(outpickle))
with open(outpickle,'wb') as outfd:
pickle.dump(data, outfd, pickle.HIGHEST_PROTOCOL)
LOGINFO('saved query info to %s, use this to '
'download results later with '
'retrieve_dataset_files' % outpickle)
# we're done at this point, return
return status, data, data['result']['setid']
# the query probably failed...
elif status == 'failed':
# we're done at this point, return
return status, data, None
# if the response was not OK, then we probably failed
else:
try:
data = json.load(resp)
msg = data['message']
LOGERROR(msg)
return 'failed', None, None
except Exception as e:
LOGEXCEPTION('failed to submit query to %s' % url)
return 'failed', None, None
except HTTPError as e:
LOGERROR('could not submit query to LCC API at: %s' % url)
LOGERROR('HTTP status code was %s, reason: %s' % (e.code, e.reason))
return 'failed', None, None
|
def retrieve_dataset_files(searchresult,
getpickle=False,
outdir=None,
apikey=None):
'''This retrieves a search result dataset's CSV and any LC zip files.
Takes the output from the `submit_post_searchquery` function above or a
pickle file generated from that function's output if the query timed out.
Parameters
----------
searchresult : str or tuple
If provided as a str, points to the pickle file created using the output
from the `submit_post_searchquery` function. If provided as a tuple,
this is the result tuple from the `submit_post_searchquery` function.
getpickle : False
If this is True, will also download the dataset's pickle. Note that
LCC-Server is a Python 3.6+ package (while lccs.py still works with
Python 2.7) and it saves its pickles in pickle.HIGHEST_PROTOCOL for
efficiency, so these pickles may be unreadable in lower Pythons. As an
alternative, the dataset CSV contains the full data table and all the
information about the dataset in its header, which is JSON
parseable. You can also use the function `get_dataset` below to get the
dataset pickle information in JSON form.
outdir : None or str
If this is a str, points to the output directory where the results will
be placed. If it's None, they will be placed in the current directory.
apikey : str or None
If this is a str, uses the given API key to authenticate the download
request. This is useful when you have a private dataset you want to get
products for.
Returns
-------
(local_dataset_csv, local_dataset_lczip, local_dataset_pickle) : tuple
This returns a tuple containing paths to the dataset CSV, LC zipfile,
and the dataset pickle if getpickle was set to True (None otherwise).
'''
# this handles the direct result case from submit_*_query functions
if isinstance(searchresult, tuple):
info, setid = searchresult[1:]
# handles the case where we give the function a existing query pickle
elif isinstance(searchresult, str) and os.path.exists(searchresult):
with open(searchresult,'rb') as infd:
info = pickle.load(infd)
setid = info['result']['setid']
else:
LOGERROR('could not understand input, '
'we need a searchresult from the '
'lccs.submit_post_searchquery function or '
'the path to an existing query pickle')
return None, None, None
# now that we have everything, let's download some files!
dataset_pickle = 'dataset-%s.pkl.gz' % setid
dataset_csv = 'dataset-%s.csv' % setid
dataset_lczip = 'lightcurves-%s.zip' % setid
if outdir is None:
localdir = os.getcwd()
else:
localdir = outdir
server_scheme, server_netloc = urlparse(info['result']['seturl'])[:2]
dataset_pickle_link = '%s://%s/d/%s' % (server_scheme,
server_netloc,
dataset_pickle)
dataset_csv_link = '%s://%s/d/%s' % (server_scheme,
server_netloc,
dataset_csv)
dataset_lczip_link = '%s://%s/p/%s' % (server_scheme,
server_netloc,
dataset_lczip)
if getpickle:
# get the dataset pickle
LOGINFO('getting %s...' % dataset_pickle_link)
try:
if os.path.exists(os.path.join(localdir, dataset_pickle)):
LOGWARNING('dataset pickle already exists, '
'not downloading again..')
local_dataset_pickle = os.path.join(localdir,
dataset_pickle)
else:
# if apikey is not None, add it in as an Authorization: Bearer
# [apikey] header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
req = Request(
dataset_pickle_link,
data=None,
headers=headers
)
resp = urlopen(req)
# save the file
LOGINFO('saving %s' % dataset_pickle)
localf = os.path.join(localdir, dataset_pickle)
with open(localf, 'wb') as outfd:
with resp:
data = resp.read()
outfd.write(data)
LOGINFO('OK -> %s' % localf)
local_dataset_pickle = localf
except HTTPError as e:
LOGERROR('could not download %s, '
'HTTP status code was: %s, reason: %s' %
(dataset_pickle_link, e.code, e.reason))
local_dataset_pickle = None
else:
local_dataset_pickle = None
# get the dataset CSV
LOGINFO('getting %s...' % dataset_csv_link)
try:
if os.path.exists(os.path.join(localdir, dataset_csv)):
LOGWARNING('dataset CSV already exists, not downloading again...')
local_dataset_csv = os.path.join(localdir, dataset_csv)
else:
# if apikey is not None, add it in as an Authorization: Bearer
# [apikey] header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
req = Request(
dataset_csv_link,
data=None,
headers=headers
)
resp = urlopen(req)
# save the file
LOGINFO('saving %s' % dataset_csv)
localf = os.path.join(localdir, dataset_csv)
with open(localf, 'wb') as outfd:
with resp:
data = resp.read()
outfd.write(data)
LOGINFO('OK -> %s' % localf)
local_dataset_csv = localf
except HTTPError as e:
LOGERROR('could not download %s, HTTP status code was: %s, reason: %s' %
(dataset_csv_link, e.code, e.reason))
local_dataset_csv = None
# get the dataset LC zip
LOGINFO('getting %s...' % dataset_lczip_link)
try:
if os.path.exists(os.path.join(localdir, dataset_lczip)):
LOGWARNING('dataset LC ZIP already exists, '
'not downloading again...')
local_dataset_lczip = os.path.join(localdir, dataset_lczip)
else:
# if apikey is not None, add it in as an Authorization: Bearer
# [apikey] header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
req = Request(
dataset_lczip_link,
data=None,
headers=headers
)
resp = urlopen(req)
# save the file
LOGINFO('saving %s' % dataset_lczip)
localf = os.path.join(localdir, dataset_lczip)
with open(localf, 'wb') as outfd:
with resp:
data = resp.read()
outfd.write(data)
LOGINFO('OK -> %s' % localf)
local_dataset_lczip = localf
except HTTPError as e:
LOGERROR('could not download %s, HTTP status code was: %s, reason: %s' %
(dataset_lczip_link, e.code, e.reason))
local_dataset_lczip = None
return local_dataset_csv, local_dataset_lczip, local_dataset_pickle
|
def cone_search(lcc_server,
center_ra,
center_decl,
radiusarcmin=5.0,
result_visibility='unlisted',
email_when_done=False,
collections=None,
columns=None,
filters=None,
sortspec=None,
samplespec=None,
limitspec=None,
download_data=True,
outdir=None,
maxtimeout=300.0,
refresh=15.0):
'''This runs a cone-search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
center_ra,center_decl : float
These are the central coordinates of the search to conduct. These can be
either decimal degrees of type float, or sexagesimal coordinates of type
str:
- OK: 290.0, 45.0
- OK: 15:00:00 +45:00:00
- OK: 15 00 00.0 -45 00 00.0
- NOT OK: 290.0 +45:00:00
- NOT OK: 15:00:00 45.0
radiusarcmin : float
This is the search radius to use for the cone-search. This is in
arcminutes. The maximum radius you can use is 60 arcminutes = 1 degree.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
'''
# turn the input into a param dict
coords = '%.5f %.5f %.1f' % (center_ra, center_decl, radiusarcmin)
params = {
'coords':coords
}
if collections:
params['collections'] = collections
if columns:
params['columns'] = columns
if filters:
params['filters'] = filters
if sortspec:
params['sortspec'] = json.dumps([sortspec])
if samplespec:
params['samplespec'] = int(samplespec)
if limitspec:
params['limitspec'] = int(limitspec)
params['visibility'] = result_visibility
params['emailwhendone'] = email_when_done
# we won't wait for the LC ZIP to complete if email_when_done = True
if email_when_done:
download_data = False
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# hit the server
api_url = '%s/api/conesearch' % lcc_server
searchresult = submit_post_searchquery(api_url, params, apikey)
# check the status of the search
status = searchresult[0]
# now we'll check if we want to download the data
if download_data:
if status == 'ok':
LOGINFO('query complete, downloading associated data...')
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if pkl:
return searchresult[1], csv, lczip, pkl
else:
return searchresult[1], csv, lczip
elif status == 'background':
LOGINFO('query is not yet complete, '
'waiting up to %.1f minutes, '
'updates every %s seconds (hit Ctrl+C to cancel)...' %
(maxtimeout/60.0, refresh))
timewaited = 0.0
while timewaited < maxtimeout:
try:
time.sleep(refresh)
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if (csv and os.path.exists(csv) and
lczip and os.path.exists(lczip)):
LOGINFO('all dataset products collected')
return searchresult[1], csv, lczip
timewaited = timewaited + refresh
except KeyboardInterrupt:
LOGWARNING('abandoned wait for downloading data')
return searchresult[1], None, None
LOGERROR('wait timed out.')
return searchresult[1], None, None
else:
LOGERROR('could not download the data for this query result')
return searchresult[1], None, None
else:
return searchresult[1], None, None
|
def xmatch_search(lcc_server,
file_to_upload,
xmatch_dist_arcsec=3.0,
result_visibility='unlisted',
email_when_done=False,
collections=None,
columns=None,
filters=None,
sortspec=None,
limitspec=None,
samplespec=None,
download_data=True,
outdir=None,
maxtimeout=300.0,
refresh=15.0):
'''This runs a cross-match search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
file_to_upload : str
This is the path to a text file containing objectid, RA, declination
rows for the objects to cross-match against the LCC-Server
collections. This should follow the format of the following example::
# example object and coordinate list
# objectid ra dec
aaa 289.99698 44.99839
bbb 293.358 -23.206
ccc 294.197 +23.181
ddd 19 25 27.9129 +42 47 03.693
eee 19:25:27 -42:47:03.21
# .
# .
# .
# etc. lines starting with '#' will be ignored
# (max 5000 objects)
xmatch_dist_arcsec : float
This is the maximum distance in arcseconds to consider when
cross-matching objects in the uploaded file to the LCC-Server's
collections. The maximum allowed distance is 30 arcseconds. Multiple
matches to an uploaded object are possible and will be returned in order
of increasing distance grouped by input `objectid`.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
'''
with open(file_to_upload) as infd:
xmq = infd.read()
# check the number of lines in the input
xmqlines = len(xmq.split('\n')[:-1])
if xmqlines > 5000:
LOGERROR('you have more than 5000 lines in the file to upload: %s' %
file_to_upload)
return None, None, None
# turn the input into a param dict
params = {'xmq':xmq,
'xmd':xmatch_dist_arcsec}
if collections:
params['collections'] = collections
if columns:
params['columns'] = columns
if filters:
params['filters'] = filters
if sortspec:
params['sortspec'] = json.dumps([sortspec])
if samplespec:
params['samplespec'] = int(samplespec)
if limitspec:
params['limitspec'] = int(limitspec)
params['visibility'] = result_visibility
params['emailwhendone'] = email_when_done
# we won't wait for the LC ZIP to complete if email_when_done = True
if email_when_done:
download_data = False
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# hit the server
api_url = '%s/api/xmatch' % lcc_server
searchresult = submit_post_searchquery(api_url, params, apikey)
# check the status of the search
status = searchresult[0]
# now we'll check if we want to download the data
if download_data:
if status == 'ok':
LOGINFO('query complete, downloading associated data...')
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if pkl:
return searchresult[1], csv, lczip, pkl
else:
return searchresult[1], csv, lczip
elif status == 'background':
LOGINFO('query is not yet complete, '
'waiting up to %.1f minutes, '
'updates every %s seconds (hit Ctrl+C to cancel)...' %
(maxtimeout/60.0, refresh))
timewaited = 0.0
while timewaited < maxtimeout:
try:
time.sleep(refresh)
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if (csv and os.path.exists(csv) and
lczip and os.path.exists(lczip)):
LOGINFO('all dataset products collected')
return searchresult[1], csv, lczip
timewaited = timewaited + refresh
except KeyboardInterrupt:
LOGWARNING('abandoned wait for downloading data')
return searchresult[1], None, None
LOGERROR('wait timed out.')
return searchresult[1], None, None
else:
LOGERROR('could not download the data for this query result')
return searchresult[1], None, None
else:
return searchresult[1], None, None
|
def get_dataset(lcc_server,
dataset_id,
strformat=False,
page=1):
'''This downloads a JSON form of a dataset from the specified lcc_server.
If the dataset contains more than 1000 rows, it will be paginated, so you
must use the `page` kwarg to get the page you want. The dataset JSON will
contain the keys 'npages', 'currpage', and 'rows_per_page' to help with
this. The 'rows' key contains the actual data rows as a list of tuples.
The JSON contains metadata about the query that produced the dataset,
information about the data table's columns, and links to download the
dataset's products including the light curve ZIP and the dataset CSV.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to.
dataset_id : str
This is the unique setid of the dataset you want to get. In the results
from the `*_search` functions above, this is the value of the
`infodict['result']['setid']` key in the first item (the infodict) in
the returned tuple.
strformat : bool
This sets if you want the returned data rows to be formatted in their
string representations already. This can be useful if you're piping the
returned JSON straight into some sort of UI and you don't want to deal
with formatting floats, etc. To do this manually when strformat is set
to False, look at the `coldesc` item in the returned dict, which gives
the Python and Numpy string format specifiers for each column in the
data table.
page : int
This sets which page of the dataset should be retrieved.
Returns
-------
dict
This returns the dataset JSON loaded into a dict.
'''
urlparams = {'strformat':1 if strformat else 0,
'page':page,
'json':1}
urlqs = urlencode(urlparams)
dataset_url = '%s/set/%s?%s' % (lcc_server, dataset_id, urlqs)
LOGINFO('retrieving dataset %s from %s, using URL: %s ...' % (lcc_server,
dataset_id,
dataset_url))
try:
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# if apikey is not None, add it in as an Authorization: Bearer [apikey]
# header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
# hit the server
req = Request(dataset_url, data=None, headers=headers)
resp = urlopen(req)
dataset = json.loads(resp.read())
return dataset
except Exception as e:
LOGEXCEPTION('could not retrieve the dataset JSON!')
return None
|
def object_info(lcc_server, objectid, db_collection_id):
'''This gets information on a single object from the LCC-Server.
Returns a dict with all of the available information on an object, including
finding charts, comments, object type and variability tags, and
period-search results (if available).
If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is
associated with an LCC-Server user account, objects that are visible to this
user will be returned, even if they are not visible to the public. Use this
to look up objects that have been marked as 'private' or 'shared'.
NOTE: you can pass the result dict returned by this function directly into
the `astrobase.checkplot.checkplot_pickle_to_png` function, e.g.::
astrobase.checkplot.checkplot_pickle_to_png(result_dict,
'object-%s-info.png' %
result_dict['objectid'])
to generate a quick PNG overview of the object information.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to.
objectid : str
This is the unique database ID of the object to retrieve info for. This
is always returned as the `db_oid` column in LCC-Server search results.
db_collection_id : str
This is the collection ID which will be searched for the object. This is
always returned as the `collection` column in LCC-Server search results.
Returns
-------
dict
A dict containing the object info is returned. Some important items in
the result dict:
- `objectinfo`: all object magnitude, color, GAIA cross-match, and
object type information available for this object
- `objectcomments`: comments on the object's variability if available
- `varinfo`: variability comments, variability features, type tags,
period and epoch information if available
- `neighbors`: information on the neighboring objects of this object in
its parent light curve collection
- `xmatch`: information on any cross-matches to external catalogs
(e.g. KIC, EPIC, TIC, APOGEE, etc.)
- `finderchart`: a base-64 encoded PNG image of the object's DSS2 RED
finder chart. To convert this to an actual PNG, try the function:
`astrobase.checkplot.pkl_io._b64_to_file`.
- `magseries`: a base-64 encoded PNG image of the object's light
curve. To convert this to an actual PNG, try the function:
`astrobase.checkplot.pkl_io._b64_to_file`.
- `pfmethods`: a list of period-finding methods applied to the object if
any. If this list is present, use the keys in it to get to the actual
period-finding results for each method. These will contain base-64
encoded PNGs of the periodogram and phased light curves using the best
three peaks in the periodogram, as well as period and epoch
information.
'''
urlparams = {
'objectid':objectid,
'collection':db_collection_id
}
urlqs = urlencode(urlparams)
url = '%s/api/object?%s' % (lcc_server, urlqs)
try:
LOGINFO(
'getting info for %s in collection %s from %s' % (
objectid,
db_collection_id,
lcc_server
)
)
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# if apikey is not None, add it in as an Authorization: Bearer [apikey]
# header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
# hit the server
req = Request(url, data=None, headers=headers)
resp = urlopen(req)
objectinfo = json.loads(resp.read())['result']
return objectinfo
except HTTPError as e:
if e.code == 404:
LOGERROR(
'additional info for object %s not '
'found in collection: %s' % (objectid,
db_collection_id)
)
else:
LOGERROR('could not retrieve object info, '
'URL used: %s, error code: %s, reason: %s' %
(url, e.code, e.reason))
return None
|
def list_recent_datasets(lcc_server, nrecent=25):
'''This lists recent publicly visible datasets available on the LCC-Server.
If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is
associated with an LCC-Server user account, datasets that belong to this
user will be returned as well, even if they are not visible to the public.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to.
nrecent : int
This indicates how many recent public datasets you want to list. This is
always capped at 1000.
Returns
-------
list of dicts
Returns a list of dicts, with each dict containing info on each dataset.
'''
urlparams = {'nsets':nrecent}
urlqs = urlencode(urlparams)
url = '%s/api/datasets?%s' % (lcc_server, urlqs)
try:
LOGINFO(
'getting list of recent publicly '
'visible and owned datasets from %s' % (
lcc_server,
)
)
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# if apikey is not None, add it in as an Authorization: Bearer [apikey]
# header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
# hit the server
req = Request(url, data=None, headers=headers)
resp = urlopen(req)
recent_datasets = json.loads(resp.read())['result']
return recent_datasets
except HTTPError as e:
LOGERROR('could not retrieve recent datasets list, '
'URL used: %s, error code: %s, reason: %s' %
(url, e.code, e.reason))
return None
|
def list_lc_collections(lcc_server):
'''This lists all light curve collections made available on the LCC-Server.
If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is
associated with an LCC-Server user account, light curve collections visible
to this user will be returned as well, even if they are not visible to the
public.
Parameters
----------
lcc_server : str
The base URL of the LCC-Server to talk to.
Returns
-------
dict
Returns a dict containing lists of info items per collection. This
includes collection_ids, lists of columns, lists of indexed columns,
lists of full-text indexed columns, detailed column descriptions, number
of objects in each collection, collection sky coverage, etc.
'''
url = '%s/api/collections' % lcc_server
try:
LOGINFO(
'getting list of recent publicly visible '
'and owned LC collections from %s' % (
lcc_server,
)
)
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# if apikey is not None, add it in as an Authorization: Bearer [apikey]
# header
if apikey:
headers = {'Authorization':'Bearer: %s' % apikey}
else:
headers = {}
# hit the server
req = Request(url, data=None, headers=headers)
resp = urlopen(req)
lcc_list = json.loads(resp.read())['result']['collections']
return lcc_list
except HTTPError as e:
LOGERROR('could not retrieve list of collections, '
'URL used: %s, error code: %s, reason: %s' %
(url, e.code, e.reason))
return None
|
def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False):
'''This calculates the Stetson index for the magseries, based on consecutive
pairs of observations.
Based on Nicole Loncke's work for her Planets and Life certificate at
Princeton in 2014.
Parameters
----------
ftimes,fmags,ferrs : np.array
The input mag/flux time-series with all non-finite elements removed.
weightbytimediff : bool
If this is True, the Stetson index for any pair of mags will be
reweighted by the difference in times between them using the scheme in
Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017)::
w_i = exp(- (t_i+1 - t_i)/ delta_t )
Returns
-------
float
The calculated Stetson J variability index.
'''
ndet = len(fmags)
if ndet > 9:
# get the median and ndet
medmag = npmedian(fmags)
# get the stetson index elements
delta_prefactor = (ndet/(ndet - 1))
sigma_i = delta_prefactor*(fmags - medmag)/ferrs
# Nicole's clever trick to advance indices by 1 and do x_i*x_(i+1)
sigma_j = nproll(sigma_i,1)
if weightbytimediff:
difft = npdiff(ftimes)
deltat = npmedian(difft)
weights_i = npexp(- difft/deltat )
products = (weights_i*sigma_i[1:]*sigma_j[1:])
else:
# ignore first elem since it's actually x_0*x_n
products = (sigma_i*sigma_j)[1:]
stetsonj = (
npsum(npsign(products) * npsqrt(npabs(products)))
) / ndet
return stetsonj
else:
LOGERROR('not enough detections in this magseries '
'to calculate stetson J index')
return npnan
|
def stetson_kindex(fmags, ferrs):
'''This calculates the Stetson K index (a robust measure of the kurtosis).
Parameters
----------
fmags,ferrs : np.array
The input mag/flux time-series to process. Must have no non-finite
elems.
Returns
-------
float
The Stetson K variability index.
'''
# use a fill in value for the errors if they're none
if ferrs is None:
ferrs = npfull_like(fmags, 0.005)
ndet = len(fmags)
if ndet > 9:
# get the median and ndet
medmag = npmedian(fmags)
# get the stetson index elements
delta_prefactor = (ndet/(ndet - 1))
sigma_i = delta_prefactor*(fmags - medmag)/ferrs
stetsonk = (
npsum(npabs(sigma_i))/(npsqrt(npsum(sigma_i*sigma_i))) *
(ndet**(-0.5))
)
return stetsonk
else:
LOGERROR('not enough detections in this magseries '
'to calculate stetson K index')
return npnan
|
def lightcurve_moments(ftimes, fmags, ferrs):
'''This calculates the weighted mean, stdev, median, MAD, percentiles, skew,
kurtosis, fraction of LC beyond 1-stdev, and IQR.
Parameters
----------
ftimes,fmags,ferrs : np.array
The input mag/flux time-series with all non-finite elements removed.
Returns
-------
dict
A dict with all of the light curve moments calculated.
'''
ndet = len(fmags)
if ndet > 9:
# now calculate the various things we need
series_median = npmedian(fmags)
series_wmean = (
npsum(fmags*(1.0/(ferrs*ferrs)))/npsum(1.0/(ferrs*ferrs))
)
series_mad = npmedian(npabs(fmags - series_median))
series_stdev = 1.483*series_mad
series_skew = spskew(fmags)
series_kurtosis = spkurtosis(fmags)
# get the beyond1std fraction
series_above1std = len(fmags[fmags > (series_median + series_stdev)])
series_below1std = len(fmags[fmags < (series_median - series_stdev)])
# this is the fraction beyond 1 stdev
series_beyond1std = (series_above1std + series_below1std)/float(ndet)
# get the magnitude percentiles
series_mag_percentiles = nppercentile(
fmags,
[5.0,10,17.5,25,32.5,40,60,67.5,75,82.5,90,95]
)
return {
'median':series_median,
'wmean':series_wmean,
'mad':series_mad,
'stdev':series_stdev,
'skew':series_skew,
'kurtosis':series_kurtosis,
'beyond1std':series_beyond1std,
'mag_percentiles':series_mag_percentiles,
'mag_iqr': series_mag_percentiles[8] - series_mag_percentiles[3],
}
else:
LOGERROR('not enough detections in this magseries '
'to calculate light curve moments')
return None
|
def lightcurve_flux_measures(ftimes, fmags, ferrs, magsarefluxes=False):
'''This calculates percentiles and percentile ratios of the flux.
Parameters
----------
ftimes,fmags,ferrs : np.array
The input mag/flux time-series with all non-finite elements removed.
magsarefluxes : bool
If the `fmags` array actually contains fluxes, will not convert `mags`
to fluxes before calculating the percentiles.
Returns
-------
dict
A dict with all of the light curve flux percentiles and percentile
ratios calculated.
'''
ndet = len(fmags)
if ndet > 9:
# get the fluxes
if magsarefluxes:
series_fluxes = fmags
else:
series_fluxes = 10.0**(-0.4*fmags)
series_flux_median = npmedian(series_fluxes)
# get the percent_amplitude for the fluxes
series_flux_percent_amplitude = (
npmax(npabs(series_fluxes))/series_flux_median
)
# get the flux percentiles
series_flux_percentiles = nppercentile(
series_fluxes,
[5.0,10,17.5,25,32.5,40,60,67.5,75,82.5,90,95]
)
series_frat_595 = (
series_flux_percentiles[-1] - series_flux_percentiles[0]
)
series_frat_1090 = (
series_flux_percentiles[-2] - series_flux_percentiles[1]
)
series_frat_175825 = (
series_flux_percentiles[-3] - series_flux_percentiles[2]
)
series_frat_2575 = (
series_flux_percentiles[-4] - series_flux_percentiles[3]
)
series_frat_325675 = (
series_flux_percentiles[-5] - series_flux_percentiles[4]
)
series_frat_4060 = (
series_flux_percentiles[-6] - series_flux_percentiles[5]
)
# calculate the flux percentile ratios
series_flux_percentile_ratio_mid20 = series_frat_4060/series_frat_595
series_flux_percentile_ratio_mid35 = series_frat_325675/series_frat_595
series_flux_percentile_ratio_mid50 = series_frat_2575/series_frat_595
series_flux_percentile_ratio_mid65 = series_frat_175825/series_frat_595
series_flux_percentile_ratio_mid80 = series_frat_1090/series_frat_595
# calculate the ratio of F595/median flux
series_percent_difference_flux_percentile = (
series_frat_595/series_flux_median
)
series_percentile_magdiff = -2.5*nplog10(
series_percent_difference_flux_percentile
)
return {
'flux_median':series_flux_median,
'flux_percent_amplitude':series_flux_percent_amplitude,
'flux_percentiles':series_flux_percentiles,
'flux_percentile_ratio_mid20':series_flux_percentile_ratio_mid20,
'flux_percentile_ratio_mid35':series_flux_percentile_ratio_mid35,
'flux_percentile_ratio_mid50':series_flux_percentile_ratio_mid50,
'flux_percentile_ratio_mid65':series_flux_percentile_ratio_mid65,
'flux_percentile_ratio_mid80':series_flux_percentile_ratio_mid80,
'percent_difference_flux_percentile':series_percentile_magdiff,
}
else:
LOGERROR('not enough detections in this magseries '
'to calculate flux measures')
return None
|
def lightcurve_ptp_measures(ftimes, fmags, ferrs):
'''This calculates various point-to-point measures (`eta` in Kim+ 2014).
Parameters
----------
ftimes,fmags,ferrs : np.array
The input mag/flux time-series with all non-finite elements removed.
Returns
-------
dict
A dict with values of the point-to-point measures, including the `eta`
variability index (often used as its inverse `inveta` to have the same
sense as increasing variability index -> more likely a variable star).
'''
ndet = len(fmags)
if ndet > 9:
timediffs = npdiff(ftimes)
# get rid of stuff with time diff = 0.0
nzind = npnonzero(timediffs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# recalculate ndet and diffs
ndet = ftimes.size
timediffs = npdiff(ftimes)
# calculate the point to point measures
p2p_abs_magdiffs = npabs(npdiff(fmags))
p2p_squared_magdiffs = npdiff(fmags)*npdiff(fmags)
robstd = npmedian(npabs(fmags - npmedian(fmags)))*1.483
robvar = robstd*robstd
# these are eta from the Kim+ 2014 paper - ratio of point-to-point
# difference to the variance of the entire series
# this is the robust version
eta_robust = npmedian(p2p_abs_magdiffs)/robvar
eta_robust = eta_robust/(ndet - 1.0)
# this is the usual version
eta_normal = npsum(p2p_squared_magdiffs)/npvar(fmags)
eta_normal = eta_normal/(ndet - 1.0)
timeweights = 1.0/(timediffs*timediffs)
# this is eta_e modified for uneven sampling from the Kim+ 2014 paper
eta_uneven_normal = (
(npsum(timeweights*p2p_squared_magdiffs) /
(npvar(fmags) * npsum(timeweights)) ) *
npmean(timeweights) *
(ftimes.max() - ftimes.min())*(ftimes.max() - ftimes.min())
)
# this is robust eta_e modified for uneven sampling from the Kim+ 2014
# paper
eta_uneven_robust = (
(npsum(timeweights*p2p_abs_magdiffs) /
(robvar * npsum(timeweights)) ) *
npmedian(timeweights) *
(ftimes[-1] - ftimes[0])*(ftimes[-1] - ftimes[0])
)
return {
'eta_normal':eta_normal,
'eta_robust':eta_robust,
'eta_uneven_normal':eta_uneven_normal,
'eta_uneven_robust':eta_uneven_robust
}
else:
return None
|
def nonperiodic_lightcurve_features(times, mags, errs, magsarefluxes=False):
'''This calculates the following nonperiodic features of the light curve,
listed in Richards, et al. 2011):
- amplitude
- beyond1std
- flux_percentile_ratio_mid20
- flux_percentile_ratio_mid35
- flux_percentile_ratio_mid50
- flux_percentile_ratio_mid65
- flux_percentile_ratio_mid80
- linear_trend
- max_slope
- median_absolute_deviation
- median_buffer_range_percentage
- pair_slope_trend
- percent_amplitude
- percent_difference_flux_percentile
- skew
- stdev
- timelength
- mintime
- maxtime
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to process.
magsarefluxes : bool
If True, will treat values in `mags` as fluxes instead of magnitudes.
Returns
-------
dict
A dict containing all of the features listed above.
'''
# remove nans first
finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]
# remove zero errors
nzind = npnonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
ndet = len(fmags)
if ndet > 9:
# calculate the moments
moments = lightcurve_moments(ftimes, fmags, ferrs)
# calculate the flux measures
fluxmeasures = lightcurve_flux_measures(ftimes, fmags, ferrs,
magsarefluxes=magsarefluxes)
# calculate the point-to-point measures
ptpmeasures = lightcurve_ptp_measures(ftimes, fmags, ferrs)
# get the length in time
mintime, maxtime = npmin(ftimes), npmax(ftimes)
timelength = maxtime - mintime
# get the amplitude
series_amplitude = 0.5*(npmax(fmags) - npmin(fmags))
# calculate the linear fit to the entire mag series
fitcoeffs = nppolyfit(ftimes, fmags, 1, w=1.0/(ferrs*ferrs))
series_linear_slope = fitcoeffs[1]
# roll fmags by 1
rolled_fmags = nproll(fmags,1)
# calculate the magnitude ratio (from the WISE paper)
series_magratio = (
(npmax(fmags) - moments['median']) / (npmax(fmags) - npmin(fmags) )
)
# this is the dictionary returned containing all the measures
measures = {
'ndet':fmags.size,
'mintime':mintime,
'maxtime':maxtime,
'timelength':timelength,
'amplitude':series_amplitude,
'ndetobslength_ratio':ndet/timelength,
'linear_fit_slope':series_linear_slope,
'magnitude_ratio':series_magratio,
}
if moments:
measures.update(moments)
if ptpmeasures:
measures.update(ptpmeasures)
if fluxmeasures:
measures.update(fluxmeasures)
return measures
else:
LOGERROR('not enough detections in this magseries '
'to calculate non-periodic features')
return None
|
def gilliland_cdpp(times, mags, errs,
windowlength=97,
polyorder=2,
binsize=23400, # in seconds: 6.5 hours for classic CDPP
sigclip=5.0,
magsarefluxes=False,
**kwargs):
'''This calculates the CDPP of a timeseries using the method in the paper:
Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6
(http://adsabs.harvard.edu/abs/2011ApJS..197....6G)
The steps are:
- pass the time-series through a Savitsky-Golay filter.
- we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this.
- also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay.
- the `windowlength` is the number of LC points to use (Kepler uses 2 days
= (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC
points).
- the `polyorder` is a quadratic by default.
- subtract the smoothed time-series from the actual light curve.
- sigma clip the remaining LC.
- get the binned mag series by averaging over 6.5 hour bins, only retaining
bins with at least 7 points.
- the standard deviation of the binned averages is the CDPP.
- multiply this by 1.168 to correct for over-subtraction of white-noise.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to calculate CDPP for.
windowlength : int
The smoothing window size to use.
polyorder : int
The polynomial order to use in the Savitsky-Golay smoothing.
binsize : int
The bin size to use for binning the light curve.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
kwargs : additional kwargs
These are passed directly to `scipy.signal.savgol_filter`.
Returns
-------
float
The calculated CDPP value.
'''
# if no errs are given, assume 0.1% errors
if errs is None:
errs = 0.001*mags
# get rid of nans first
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes = times[find]
fmags = mags[find]
ferrs = errs[find]
if ftimes.size < (3*windowlength):
LOGERROR('not enough LC points to calculate CDPP')
return npnan
# now get the smoothed mag series using the filter
# kwargs are provided to the savgol_filter function
smoothed = savgol_filter(fmags, windowlength, polyorder, **kwargs)
subtracted = fmags - smoothed
# sigclip the subtracted light curve
stimes, smags, serrs = sigclip_magseries(ftimes, subtracted, ferrs,
magsarefluxes=magsarefluxes)
# bin over 6.5 hour bins and throw away all bins with less than 7 elements
binned = time_bin_magseries_with_errs(stimes, smags, serrs,
binsize=binsize,
minbinelems=7)
bmags = binned['binnedmags']
# stdev of bin mags x 1.168 -> CDPP
cdpp = npstd(bmags) * 1.168
return cdpp
|
def all_nonperiodic_features(times, mags, errs,
magsarefluxes=False,
stetson_weightbytimediff=True):
'''This rolls up the feature functions above and returns a single dict.
NOTE: this doesn't calculate the CDPP to save time since binning and
smoothing takes a while for dense light curves.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to calculate CDPP for.
magsarefluxes : bool
If True, indicates `mags` is actually an array of flux values.
stetson_weightbytimediff : bool
If this is True, the Stetson index for any pair of mags will be
reweighted by the difference in times between them using the scheme in
Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017)::
w_i = exp(- (t_i+1 - t_i)/ delta_t )
Returns
-------
dict
Returns a dict with all of the variability features.
'''
# remove nans first
finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]
# remove zero errors
nzind = npnonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
xfeatures = nonperiodic_lightcurve_features(times, mags, errs,
magsarefluxes=magsarefluxes)
stetj = stetson_jindex(ftimes, fmags, ferrs,
weightbytimediff=stetson_weightbytimediff)
stetk = stetson_kindex(fmags, ferrs)
xfeatures.update({'stetsonj':stetj,
'stetsonk':stetk})
return xfeatures
|
def _bls_runner(times,
mags,
nfreq,
freqmin,
stepsize,
nbins,
minduration,
maxduration):
'''This runs the pyeebls.eebls function using the given inputs.
Parameters
----------
times,mags : np.array
The input magnitude time-series to search for transits.
nfreq : int
The number of frequencies to use when searching for transits.
freqmin : float
The minimum frequency of the period-search -> max period that will be
used for the search.
stepsize : float
The step-size in frequency to use to generate a frequency-grid.
nbins : int
The number of phase bins to use.
minduration : float
The minimum fractional transit duration that will be considered.
maxduration : float
The maximum fractional transit duration that will be considered.
Returns
-------
dict
Returns a dict of the form::
{
'power': the periodogram power array,
'bestperiod': the best period found,
'bestpower': the highest peak of the periodogram power,
'transdepth': transit depth found by eebls.f,
'transduration': transit duration found by eebls.f,
'transingressbin': transit ingress bin found by eebls.f,
'transegressbin': transit egress bin found by eebls.f,
}
'''
workarr_u = npones(times.size)
workarr_v = npones(times.size)
blsresult = eebls(times, mags,
workarr_u, workarr_v,
nfreq, freqmin, stepsize,
nbins, minduration, maxduration)
return {'power':blsresult[0],
'bestperiod':blsresult[1],
'bestpower':blsresult[2],
'transdepth':blsresult[3],
'transduration':blsresult[4],
'transingressbin':blsresult[5],
'transegressbin':blsresult[6]}
|
def _parallel_bls_worker(task):
'''
This wraps the BLS function for the parallel driver below.
Parameters
----------
tasks : tuple
This is of the form::
task[0] = times
task[1] = mags
task[2] = nfreq
task[3] = freqmin
task[4] = stepsize
task[5] = nbins
task[6] = minduration
task[7] = maxduration
Returns
-------
dict
Returns a dict of the form::
{
'power': the periodogram power array,
'bestperiod': the best period found,
'bestpower': the highest peak of the periodogram power,
'transdepth': transit depth found by eebls.f,
'transduration': transit duration found by eebls.f,
'transingressbin': transit ingress bin found by eebls.f,
'transegressbin': transit egress bin found by eebls.f,
}
'''
try:
return _bls_runner(*task)
except Exception as e:
LOGEXCEPTION('BLS failed for task %s' % repr(task[2:]))
return {
'power':nparray([npnan for x in range(task[2])]),
'bestperiod':npnan,
'bestpower':npnan,
'transdepth':npnan,
'transduration':npnan,
'transingressbin':npnan,
'transegressbin':npnan
}
|
def bls_serial_pfind(
times, mags, errs,
magsarefluxes=False,
startp=0.1, # search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=5.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
nphasebins=200,
autofreq=True, # figure out f0, nf, and df automatically
periodepsilon=0.1,
nbestpeaks=5,
sigclip=10.0,
verbose=True,
get_stats=True,
):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey
et al. 2015. This is the serial version (which is good enough in most cases
because BLS in Fortran is fairly fast). If nfreq > 5e5, this will take a
while.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
nphasebins : int
The number of phase bins to use in the period search.
autofreq : bool
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
get_stats : bool
If True, runs :py:func:`.bls_stats_singleperiod` for each of the best
periods in the output and injects the output into the output dict so you
only have to run this function to get the periods and their stats.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'stats': BLS stats for each best period,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': the result dict from the eebls.f wrapper function,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if autofreq:
# figure out the best number of phasebins to use
nphasebins = int(npceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, nphasebins: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, nphasebins,
mintransitduration, maxtransitduration))
else:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, nphasebins: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, nphasebins,
mintransitduration, maxtransitduration))
if nfreq > 5.0e5:
if verbose:
LOGWARNING('more than 5.0e5 frequencies to go through; '
'this will take a while. '
'you might want to use the '
'periodbase.bls_parallel_pfind function instead')
if minfreq < (1.0/(stimes.max() - stimes.min())):
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
# run BLS
try:
blsresult = _bls_runner(stimes,
smags,
nfreq,
minfreq,
stepsize,
nphasebins,
mintransitduration,
maxtransitduration)
# find the peaks in the BLS. this uses wavelet transforms to
# smooth the spectrum and find peaks. a similar thing would be
# to do a convolution with a gaussian kernel or a tophat
# function, calculate d/dx(result), then get indices where this
# is zero
# blspeakinds = find_peaks_cwt(blsresults['power'],
# nparray([2.0,3.0,4.0,5.0]))
frequencies = minfreq + nparange(nfreq)*stepsize
periods = 1.0/frequencies
lsp = blsresult['power']
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'blsresult':blsresult,
'stepsize':stepsize,
'nfreq':nfreq,
'nphasebins':nphasebins,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
# get stats if requested
if get_stats:
resultdict['stats'] = []
for bp in nbestperiods:
if verbose:
LOGINFO("Getting stats for best period: %.6f" % bp)
this_pstats = bls_stats_singleperiod(
times, mags, errs, bp,
magsarefluxes=resultdict['kwargs']['magsarefluxes'],
sigclip=resultdict['kwargs']['sigclip'],
nphasebins=resultdict['nphasebins'],
mintransitduration=resultdict['mintransitduration'],
maxtransitduration=resultdict['maxtransitduration'],
verbose=verbose,
)
resultdict['stats'].append(this_pstats)
return resultdict
except Exception as e:
LOGEXCEPTION('BLS failed!')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'blsresult':None,
'stepsize':stepsize,
'nfreq':nfreq,
'nphasebins':nphasebins,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'blsresult':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
|
def bls_parallel_pfind(
times, mags, errs,
magsarefluxes=False,
startp=0.1, # by default, search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=1.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
nphasebins=200,
autofreq=True, # figure out f0, nf, and df automatically
nbestpeaks=5,
periodepsilon=0.1, # 0.1
sigclip=10.0,
verbose=True,
nworkers=None,
get_stats=True,
):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey
et al. 2015. Breaks up the full frequency space into chunks and passes them
to parallel BLS workers.
NOTE: the combined BLS spectrum produced by this function is not identical
to that produced by running BLS in one shot for the entire frequency
space. There are differences on the order of 1.0e-3 or so in the respective
peak values, but peaks appear at the same frequencies for both methods. This
is likely due to different aliasing caused by smaller chunks of the
frequency space used by the parallel workers in this function. When in
doubt, confirm results for this parallel implementation by comparing to
those from the serial implementation above.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
nphasebins : int
The number of phase bins to use in the period search.
autofreq : bool
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
get_stats : bool
If True, runs :py:func:`.bls_stats_singleperiod` for each of the best
periods in the output and injects the output into the output dict so you
only have to run this function to get the periods and their stats.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'stats': list of stats dicts returned for each best period,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if autofreq:
# figure out the best number of phasebins to use
nphasebins = int(npceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, nphasebins: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, nphasebins,
mintransitduration, maxtransitduration))
else:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, nphasebins: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, nphasebins,
mintransitduration, maxtransitduration))
# check the minimum frequency
if minfreq < (1.0/(stimes.max() - stimes.min())):
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
#############################
## NOW RUN BLS IN PARALLEL ##
#############################
# fix number of CPUs if needed
if not nworkers or nworkers > NCPUS:
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
# the frequencies array to be searched
frequencies = minfreq + nparange(nfreq)*stepsize
# break up the tasks into chunks
csrem = int(fmod(nfreq, nworkers))
csint = int(float(nfreq/nworkers))
chunk_minfreqs, chunk_nfreqs = [], []
for x in range(nworkers):
this_minfreqs = frequencies[x*csint]
# handle usual nfreqs
if x < (nworkers - 1):
this_nfreqs = frequencies[x*csint:x*csint+csint].size
else:
this_nfreqs = frequencies[x*csint:x*csint+csint+csrem].size
chunk_minfreqs.append(this_minfreqs)
chunk_nfreqs.append(this_nfreqs)
# populate the tasks list
tasks = [(stimes, smags,
chunk_minf, chunk_nf,
stepsize, nphasebins,
mintransitduration, maxtransitduration)
for (chunk_nf, chunk_minf)
in zip(chunk_minfreqs, chunk_nfreqs)]
if verbose:
for ind, task in enumerate(tasks):
LOGINFO('worker %s: minfreq = %.6f, nfreqs = %s' %
(ind+1, task[3], task[2]))
LOGINFO('running...')
# return tasks
# start the pool
pool = Pool(nworkers)
results = pool.map(_parallel_bls_worker, tasks)
pool.close()
pool.join()
del pool
# now concatenate the output lsp arrays
lsp = npconcatenate([x['power'] for x in results])
periods = 1.0/frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'blsresult':None,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'blsresult':results,
'stepsize':stepsize,
'nfreq':nfreq,
'nphasebins':nphasebins,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
# get stats if requested
if get_stats:
resultdict['stats'] = []
for bp in nbestperiods.copy():
if verbose:
LOGINFO("Getting stats for best period: %.6f" % bp)
this_pstats = bls_stats_singleperiod(
times, mags, errs, bp,
magsarefluxes=resultdict['kwargs']['magsarefluxes'],
sigclip=resultdict['kwargs']['sigclip'],
nphasebins=resultdict['nphasebins'],
mintransitduration=resultdict['mintransitduration'],
maxtransitduration=resultdict['maxtransitduration'],
verbose=verbose,
)
resultdict['stats'].append(this_pstats)
return resultdict
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'blsresult':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
|
def _get_bls_stats(stimes,
smags,
serrs,
thistransdepth,
thistransduration,
ingressdurationfraction,
nphasebins,
thistransingressbin,
thistransegressbin,
thisbestperiod,
thisnphasebins,
magsarefluxes=False,
verbose=False):
'''
Actually calculates the stats.
'''
try:
# try getting the minimum light epoch using the phase bin method
me_epochbin = int((thistransegressbin +
thistransingressbin)/2.0)
me_phases = (
(stimes - stimes.min())/thisbestperiod -
npfloor((stimes - stimes.min())/thisbestperiod)
)
me_phases_sortind = npargsort(me_phases)
me_sorted_phases = me_phases[me_phases_sortind]
me_sorted_times = stimes[me_phases_sortind]
me_bins = nplinspace(0.0, 1.0, thisnphasebins)
me_bininds = npdigitize(me_sorted_phases, me_bins)
me_centertransit_ind = me_bininds == me_epochbin
me_centertransit_phase = (
npmedian(me_sorted_phases[me_centertransit_ind])
)
me_centertransit_timeloc = npwhere(
npabs(me_sorted_phases - me_centertransit_phase) ==
npmin(npabs(me_sorted_phases - me_centertransit_phase))
)
me_centertransit_time = me_sorted_times[
me_centertransit_timeloc
]
if me_centertransit_time.size > 1:
LOGWARNING('multiple possible times-of-center transits '
'found for period %.7f, picking the first '
'one from: %s' %
(thisbestperiod, repr(me_centertransit_time)))
thisminepoch = me_centertransit_time[0]
except Exception as e:
LOGEXCEPTION(
'could not determine the center time of transit for '
'the phased LC, trying SavGol fit instead...'
)
# fit a Savitsky-Golay instead and get its minimum
savfit = savgol_fit_magseries(stimes, smags, serrs,
thisbestperiod,
magsarefluxes=magsarefluxes,
verbose=verbose,
sigclip=None)
thisminepoch = savfit['fitinfo']['fitepoch']
if isinstance(thisminepoch, npndarray):
if verbose:
LOGWARNING('minimum epoch is actually an array:\n'
'%s\n'
'instead of a float, '
'are there duplicate time values '
'in the original input? '
'will use the first value in this array.'
% repr(thisminepoch))
thisminepoch = thisminepoch[0]
# set up trapezoid transit model to fit for this LC
transitparams = [
thisbestperiod,
thisminepoch,
thistransdepth,
thistransduration,
ingressdurationfraction*thistransduration
]
modelfit = traptransit_fit_magseries(
stimes,
smags,
serrs,
transitparams,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose
)
# if the model fit succeeds, calculate SNR using the trapezoid model fit
if modelfit and modelfit['fitinfo']['finalparams'] is not None:
fitparams = modelfit['fitinfo']['finalparams']
fiterrs = modelfit['fitinfo']['finalparamerrs']
modelmags, actualmags, modelphase = (
modelfit['fitinfo']['fitmags'],
modelfit['magseries']['mags'],
modelfit['magseries']['phase']
)
subtractedmags = actualmags - modelmags
subtractedrms = npstd(subtractedmags)
fit_period, fit_epoch, fit_depth, fit_duration, fit_ingress_dur = (
fitparams
)
npts_in_transit = modelfit['fitinfo']['ntransitpoints']
transit_snr = (
npsqrt(npts_in_transit) * npabs(fit_depth/subtractedrms)
)
if verbose:
LOGINFO('refit best period: %.6f, '
'refit center of transit: %.5f' %
(fit_period, fit_epoch))
LOGINFO('npoints in transit: %s' % npts_in_transit)
LOGINFO('transit depth (delta): %.5f, '
'frac transit length (q): %.3f, '
' SNR: %.3f' %
(fit_depth,
fit_duration,
transit_snr))
return {'period':fit_period,
'epoch':fit_epoch,
'snr':transit_snr,
'transitdepth':fit_depth,
'transitduration':fit_duration,
'nphasebins':nphasebins,
'transingressbin':thistransingressbin,
'transegressbin':thistransegressbin,
'npoints_in_transit':npts_in_transit,
'blsmodel':modelmags,
'subtractedmags':subtractedmags,
'phasedmags':actualmags,
'phases':modelphase,
'fitparams':fitparams,
'fiterrs':fiterrs,
'fitinfo':modelfit}
# if the model fit doesn't work, then do the SNR calculation the old way
else:
# phase using this epoch
phased_magseries = phase_magseries_with_errs(stimes,
smags,
serrs,
thisbestperiod,
thisminepoch,
wrap=False,
sort=True)
tphase = phased_magseries['phase']
tmags = phased_magseries['mags']
# use the transit depth and duration to subtract the BLS transit
# model from the phased mag series. we're centered about 0.0 as the
# phase of the transit minimum so we need to look at stuff from
# [0.0, transitphase] and [1.0-transitphase, 1.0]
transitphase = thistransduration/2.0
transitindices = ((tphase < transitphase) |
(tphase > (1.0 - transitphase)))
# this is the BLS model
# constant = median(tmags) outside transit
# constant = thistransitdepth inside transit
blsmodel = npfull_like(tmags, npmedian(tmags))
if magsarefluxes:
# eebls.f returns +ve transit depth for fluxes
# so we need to subtract here to get fainter fluxes in transit
blsmodel[transitindices] = (
blsmodel[transitindices] - thistransdepth
)
else:
# eebls.f returns -ve transit depth for magnitudes
# so we need to subtract here to get fainter mags in transits
blsmodel[transitindices] = (
blsmodel[transitindices] - thistransdepth
)
# see __init__/get_snr_of_dip docstring for description of transit
# SNR equation, which is what we use for `thissnr`.
subtractedmags = tmags - blsmodel
subtractedrms = npstd(subtractedmags)
npts_in_transit = len(tmags[transitindices])
thissnr = (
npsqrt(npts_in_transit) * npabs(thistransdepth/subtractedrms)
)
# tell user about stuff if verbose = True
if verbose:
LOGINFO('refit best period: %.6f, '
'refit center of transit: %.5f' %
(thisbestperiod, thisminepoch))
LOGINFO('transit ingress phase = %.3f to %.3f' % (1.0 -
transitphase,
1.0))
LOGINFO('transit egress phase = %.3f to %.3f' % (0.0,
transitphase))
LOGINFO('npoints in transit: %s' % tmags[transitindices].size)
LOGINFO('transit depth (delta): %.5f, '
'frac transit length (q): %.3f, '
' SNR: %.3f' %
(thistransdepth,
thistransduration,
thissnr))
return {'period':thisbestperiod,
'epoch':thisminepoch,
'snr':thissnr,
'transitdepth':thistransdepth,
'transitduration':thistransduration,
'nphasebins':nphasebins,
'transingressbin':thistransingressbin,
'transegressbin':thistransegressbin,
'blsmodel':blsmodel,
'subtractedmags':subtractedmags,
'phasedmags':tmags,
'phases':tphase}
|
def bls_stats_singleperiod(times, mags, errs, period,
magsarefluxes=False,
sigclip=10.0,
perioddeltapercent=10,
nphasebins=200,
mintransitduration=0.01,
maxtransitduration=0.4,
ingressdurationfraction=0.1,
verbose=True):
'''This calculates the SNR, depth, duration, a refit period, and time of
center-transit for a single period.
The equation used for SNR is::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
NOTE: you should set the kwargs `sigclip`, `nphasebins`,
`mintransitduration`, `maxtransitduration` to what you used for an initial
BLS run to detect transits in the input light curve to match those input
conditions.
Parameters
----------
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
period : float
The period to search around and refit the transits. This will be used to
calculate the start and end periods of a rerun of BLS to calculate the
stats.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
nphasebins : int
The number of phase bins to use in the BLS run.
mintransitduration : float
The minimum transit duration in phase to consider.
maxtransitduration : float
The maximum transit duration to consider.
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'period': the refit best period,
'epoch': the refit epoch (i.e. mid-transit time),
'snr':the SNR of the transit,
'transitdepth':the depth of the transit,
'transitduration':the duration of the transit,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'blsmodel':the full BLS model used along with its parameters,
'subtractedmags':BLS model - phased light curve,
'phasedmags':the phase light curve,
'phases': the phase values}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the period interval
startp = period - perioddeltapercent*period/100.0
if startp < 0:
startp = period
endp = period + perioddeltapercent*period/100.0
# rerun BLS in serial mode around the specified period to get the
# transit depth, duration, ingress and egress bins
blsres = bls_serial_pfind(stimes, smags, serrs,
verbose=verbose,
startp=startp,
endp=endp,
nphasebins=nphasebins,
mintransitduration=mintransitduration,
maxtransitduration=maxtransitduration,
magsarefluxes=magsarefluxes,
get_stats=False,
sigclip=None)
if (not blsres or
'blsresult' not in blsres or
blsres['blsresult'] is None):
LOGERROR("BLS failed during a period-search "
"performed around the input best period: %.6f. "
"Can't continue. " % period)
return None
thistransdepth = blsres['blsresult']['transdepth']
thistransduration = blsres['blsresult']['transduration']
thisbestperiod = blsres['bestperiod']
thistransingressbin = blsres['blsresult']['transingressbin']
thistransegressbin = blsres['blsresult']['transegressbin']
thisnphasebins = nphasebins
stats = _get_bls_stats(stimes,
smags,
serrs,
thistransdepth,
thistransduration,
ingressdurationfraction,
nphasebins,
thistransingressbin,
thistransegressbin,
thisbestperiod,
thisnphasebins,
magsarefluxes=magsarefluxes,
verbose=verbose)
return stats
# if there aren't enough points in the mag series, bail out
else:
LOGERROR('no good detections for these times and mags, skipping...')
return None
|
def bls_snr(blsdict,
times,
mags,
errs,
assumeserialbls=False,
magsarefluxes=False,
sigclip=10.0,
npeaks=None,
perioddeltapercent=10,
ingressdurationfraction=0.1,
verbose=True):
'''Calculates the signal to noise ratio for each best peak in the BLS
periodogram, along with transit depth, duration, and refit period and epoch.
The following equation is used for SNR::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
Parameters
----------
blsdict : dict
This is an lspinfo dict produced by either `bls_parallel_pfind` or
`bls_serial_pfind` in this module, or by your own BLS function. If you
provide results in a dict from an external BLS function, make sure this
matches the form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
assumeserialbls : bool
If this is True, this function will not rerun BLS around each best peak
in the input lspinfo dict to refit the periods and epochs. This is
usally required for `bls_parallel_pfind` so set this to False if you use
results from that function. The parallel method breaks up the frequency
space into chunks for speed, and the results may not exactly match those
from a regular BLS run.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
npeaks : int or None
This controls how many of the periods in `blsdict['nbestperiods']` to
find the SNR for. If it's None, then this will calculate the SNR for all
of them. If it's an integer between 1 and
`len(blsdict['nbestperiods'])`, will calculate for only the specified
number of peak periods, starting from the best period.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'npeaks: the number of periodogram peaks requested to get SNR for,
'period': list of refit best periods for each requested peak,
'epoch': list of refit epochs (i.e. mid-transit times),
'snr':list of SNRs of the transit for each requested peak,
'transitdepth':list of depths of the transits,
'transitduration':list of durations of the transits,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'allblsmodels':the full BLS models used along with its parameters,
'allsubtractedmags':BLS models - phased light curves,
'allphasedmags':the phase light curves,
'allphases': the phase values}
'''
# figure out how many periods to work on
if (npeaks and (0 < npeaks < len(blsdict['nbestperiods']))):
nperiods = npeaks
else:
if verbose:
LOGWARNING('npeaks not specified or invalid, '
'getting SNR for all %s BLS peaks' %
len(blsdict['nbestperiods']))
nperiods = len(blsdict['nbestperiods'])
nbestperiods = blsdict['nbestperiods'][:nperiods]
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
nbestsnrs = []
transitdepth, transitduration = [], []
nphasebins, transingressbin, transegressbin = [], [], []
# keep these around for diagnostics
allsubtractedmags = []
allphasedmags = []
allphases = []
allblsmodels = []
# these are refit periods and epochs
refitperiods = []
refitepochs = []
for period in nbestperiods:
# get the period interval
startp = period - perioddeltapercent*period/100.0
if startp < 0:
startp = period
endp = period + perioddeltapercent*period/100.0
# see if we need to rerun bls_serial_pfind
if not assumeserialbls:
# run bls_serial_pfind with the kwargs copied over from the
# initial run. replace only the startp, endp, verbose, sigclip
# kwarg values
prevkwargs = blsdict['kwargs'].copy()
prevkwargs['verbose'] = verbose
prevkwargs['startp'] = startp
prevkwargs['endp'] = endp
prevkwargs['sigclip'] = None
blsres = bls_serial_pfind(stimes,
smags,
serrs,
**prevkwargs)
else:
blsres = blsdict
thistransdepth = blsres['blsresult']['transdepth']
thistransduration = blsres['blsresult']['transduration']
thisbestperiod = blsres['bestperiod']
thistransingressbin = blsres['blsresult']['transingressbin']
thistransegressbin = blsres['blsresult']['transegressbin']
thisnphasebins = blsdict['kwargs']['nphasebins']
stats = _get_bls_stats(stimes,
smags,
serrs,
thistransdepth,
thistransduration,
ingressdurationfraction,
nphasebins,
thistransingressbin,
thistransegressbin,
thisbestperiod,
thisnphasebins,
magsarefluxes=magsarefluxes,
verbose=verbose)
# update the lists with results from this peak
nbestsnrs.append(stats['snr'])
transitdepth.append(stats['transitdepth'])
transitduration.append(stats['transitduration'])
transingressbin.append(stats['transingressbin'])
transegressbin.append(stats['transegressbin'])
nphasebins.append(stats['nphasebins'])
# update the refit periods and epochs
refitperiods.append(stats['period'])
refitepochs.append(stats['epoch'])
# update the diagnostics
allsubtractedmags.append(stats['subtractedmags'])
allphasedmags.append(stats['phasedmags'])
allphases.append(stats['phases'])
allblsmodels.append(stats['blsmodel'])
# done with working on each peak
# if there aren't enough points in the mag series, bail out
else:
LOGERROR('no good detections for these times and mags, skipping...')
nbestsnrs = None
transitdepth, transitduration = None, None
nphasebins, transingressbin, transegressbin = None, None, None
allsubtractedmags, allphases, allphasedmags = None, None, None
return {'npeaks':npeaks,
'period':refitperiods,
'epoch':refitepochs,
'snr':nbestsnrs,
'transitdepth':transitdepth,
'transitduration':transitduration,
'nphasebins':nphasebins,
'transingressbin':transingressbin,
'transegressbin':transegressbin,
'allblsmodels':allblsmodels,
'allsubtractedmags':allsubtractedmags,
'allphasedmags':allphasedmags,
'allphases':allphases}
|
def massradius(age, planetdist, coremass,
mass='massjupiter',
radius='radiusjupiter'):
'''This function gets the Fortney mass-radius relation for planets.
Parameters
----------
age : float
This should be one of: 0.3, 1.0, 4.5 [in Gyr].
planetdist : float
This should be one of: 0.02, 0.045, 0.1, 1.0, 9.5 [in AU]
coremass : int
This should be one of: 0, 10, 25, 50, 100 [in Mearth]
mass : {'massjupiter','massearth'}
Sets the mass units.
radius : str
Sets the radius units. Only 'radiusjupiter' is used for now.
Returns
-------
dict
A dict of the following form is returned::
{'mass': an array containing the masses to plot),
'radius': an array containing the radii to plot}
These can be passed to a plotting routine to make mass-radius plot for
the specified age, planet-star distance, and core-mass.
'''
MR = {0.3:MASSESRADII_0_3GYR,
1.0:MASSESRADII_1_0GYR,
4.5:MASSESRADII_4_5GYR}
if age not in MR:
print('given age not in Fortney 2007, returning...')
return
massradius = MR[age]
if (planetdist in massradius) and (coremass in massradius[planetdist]):
print('getting % Gyr M-R for planet dist %s AU, '
'core mass %s Mearth...' % (age, planetdist, coremass))
massradrelation = massradius[planetdist][coremass]
outdict = {'mass':array(massradrelation[mass]),
'radius':array(massradrelation[radius])}
return outdict
|
def _collect_tfa_stats(task):
'''
This is a parallel worker to gather LC stats.
task[0] = lcfile
task[1] = lcformat
task[2] = lcformatdir
task[3] = timecols
task[4] = magcols
task[5] = errcols
task[6] = custom_bandpasses
'''
try:
(lcfile, lcformat, lcformatdir,
timecols, magcols, errcols,
custom_bandpasses) = task
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
#
# collect the necessary stats for this light curve
#
# 1. number of observations
# 2. median mag
# 3. eta_normal
# 4. MAD
# 5. objectid
# 6. get mags and colors from objectinfo if there's one in lcdict
if 'objectid' in lcdict:
objectid = lcdict['objectid']
elif 'objectinfo' in lcdict and 'objectid' in lcdict['objectinfo']:
objectid = lcdict['objectinfo']['objectid']
elif 'objectinfo' in lcdict and 'hatid' in lcdict['objectinfo']:
objectid = lcdict['objectinfo']['hatid']
else:
LOGERROR('no objectid present in lcdict for LC %s, '
'using filename prefix as objectid' % lcfile)
objectid = os.path.splitext(os.path.basename(lcfile))[0]
if 'objectinfo' in lcdict:
colorfeat = starfeatures.color_features(
lcdict['objectinfo'],
deredden=False,
custom_bandpasses=custom_bandpasses
)
else:
LOGERROR('no objectinfo dict in lcdict, '
'could not get magnitudes for LC %s, '
'cannot use for TFA template ensemble' %
lcfile)
return None
# this is the initial dict
resultdict = {'objectid':objectid,
'ra':lcdict['objectinfo']['ra'],
'decl':lcdict['objectinfo']['decl'],
'colorfeat':colorfeat,
'lcfpath':os.path.abspath(lcfile),
'lcformat':lcformat,
'lcformatdir':lcformatdir,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols}
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
try:
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# get the variability features for this object
varfeat = varfeatures.all_nonperiodic_features(
times, mags, errs
)
resultdict[mcol] = varfeat
except Exception as e:
LOGEXCEPTION('%s, magcol: %s, probably ran into all-nans' %
(lcfile, mcol))
resultdict[mcol] = {'ndet':0,
'mad':np.nan,
'eta_normal':np.nan}
return resultdict
except Exception as e:
LOGEXCEPTION('could not execute get_tfa_stats for task: %s' %
repr(task))
return None
|
def _reform_templatelc_for_tfa(task):
'''
This is a parallel worker that reforms light curves for TFA.
task[0] = lcfile
task[1] = lcformat
task[2] = lcformatdir
task[3] = timecol
task[4] = magcol
task[5] = errcol
task[6] = timebase
task[7] = interpolate_type
task[8] = sigclip
'''
try:
(lcfile, lcformat, lcformatdir,
tcol, mcol, ecol,
timebase, interpolate_type, sigclip) = task
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
outdict = {}
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
#
# now we'll do: 1. sigclip, 2. reform to timebase, 3. renorm to zero
#
# 1. sigclip as requested
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
sigclip=sigclip)
# 2. now, we'll renorm to the timebase
mags_interpolator = spi.interp1d(stimes, smags,
kind=interpolate_type,
fill_value='extrapolate')
errs_interpolator = spi.interp1d(stimes, serrs,
kind=interpolate_type,
fill_value='extrapolate')
interpolated_mags = mags_interpolator(timebase)
interpolated_errs = errs_interpolator(timebase)
# 3. renorm to zero
magmedian = np.median(interpolated_mags)
renormed_mags = interpolated_mags - magmedian
# update the dict
outdict = {'mags':renormed_mags,
'errs':interpolated_errs,
'origmags':interpolated_mags}
#
# done with this magcol
#
return outdict
except Exception as e:
LOGEXCEPTION('reform LC task failed: %s' % repr(task))
return None
|
def tfa_templates_lclist(
lclist,
lcinfo_pkl=None,
outfile=None,
target_template_frac=0.1,
max_target_frac_obs=0.25,
min_template_number=10,
max_template_number=1000,
max_rms=0.15,
max_mult_above_magmad=1.5,
max_mult_above_mageta=1.5,
xieta_bins=20,
mag_bandpass='sdssr',
custom_bandpasses=None,
mag_bright_limit=10.0,
mag_faint_limit=12.0,
process_template_lcs=True,
template_sigclip=5.0,
template_interpolate='nearest',
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
nworkers=NCPUS,
maxworkertasks=1000,
):
'''This selects template objects for TFA.
Selection criteria for TFA template ensemble objects:
- not variable: use a poly fit to the mag-MAD relation and eta-normal
variability index to get nonvar objects
- not more than 10% of the total number of objects in the field or
`max_tfa_templates` at most
- allow shuffling of the templates if the target ends up in them
- nothing with less than the median number of observations in the field
- sigma-clip the input time series observations
- TODO: uniform sampling in tangent plane coordinates (we'll need ra and
decl)
This also determines the effective cadence that all TFA LCs will be binned
to as the template LC with the largest number of non-nan observations will
be used. All template LCs will be renormed to zero.
Parameters
----------
lclist : list of str
This is a list of light curves to use as input to generate the template
set.
lcinfo_pkl : str or None
If provided, is a file path to a pickle file created by this function on
a previous run containing the LC information. This will be loaded
directly instead of having to re-run LC info collection.
outfile : str or None
This is the pickle filename to which the TFA template list will be
written to. If None, a default file name will be used for this.
target_template_frac : float
This is the fraction of total objects in lclist to use for the number of
templates.
max_target_frac_obs : float
This sets the number of templates to generate if the number of
observations for the light curves is smaller than the number of objects
in the collection. The number of templates will be set to this fraction
of the number of observations if this is the case.
min_template_number : int
This is the minimum number of templates to generate.
max_template_number : int
This is the maximum number of templates to generate. If
`target_template_frac` times the number of objects is greater than
`max_template_number`, only `max_template_number` templates will be
used.
max_rms : float
This is the maximum light curve RMS for an object to consider it as a
possible template ensemble member.
max_mult_above_magmad : float
This is the maximum multiplier above the mag-RMS fit to consider an
object as variable and thus not part of the template ensemble.
max_mult_above_mageta : float
This is the maximum multiplier above the mag-eta (variable index) fit to
consider an object as variable and thus not part of the template
ensemble.
mag_bandpass : str
This sets the key in the light curve dict's objectinfo dict to use as
the canonical magnitude for the object and apply any magnitude limits
to.
custom_bandpasses : dict or None
This can be used to provide any custom band name keys to the star
feature collection function.
mag_bright_limit : float or list of floats
This sets the brightest mag (in the `mag_bandpass` filter) for a
potential member of the TFA template ensemble. If this is a single
float, the value will be used for all magcols. If this is a list of
floats with len = len(magcols), the specific bright limits will be used
for each magcol individually.
mag_faint_limit : float or list of floats
This sets the faintest mag (in the `mag_bandpass` filter) for a
potential member of the TFA template ensemble. If this is a single
float, the value will be used for all magcols. If this is a list of
floats with len = len(magcols), the specific faint limits will be used
for each magcol individually.
process_template_lcs : bool
If True, will reform the template light curves to the chosen
time-base. If False, will only select light curves for templates but not
process them. This is useful for initial exploration of how the template
LC are selected.
template_sigclip : float or sequence of floats or None
This sets the sigma-clip to be applied to the template light curves.
template_interpolate : str
This sets the kwarg to pass to `scipy.interpolate.interp1d` to set the
kind of interpolation to use when reforming light curves to the TFA
template timebase.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
nworkers : int
The number of parallel workers to launch.
maxworkertasks : int
The maximum number of tasks to run per worker before it is replaced by a
fresh one.
Returns
-------
dict
This function returns a dict that can be passed directly to
`apply_tfa_magseries` below. It can optionally produce a pickle with the
same dict, which can also be passed to that function.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
LOGINFO('collecting light curve information for %s objects in list...' %
len(lclist))
#
# check if we have cached results for this run
#
# case where we provide a cache info pkl directly
if lcinfo_pkl and os.path.exists(lcinfo_pkl):
with open(lcinfo_pkl,'rb') as infd:
results = pickle.load(infd)
# case where we don't have an info pickle or an outfile
elif ((not outfile) and
os.path.exists('tfa-collected-lcfinfo-%s.pkl' % lcformat)):
with open('tfa-collected-lcfinfo-%s.pkl' % lcformat, 'rb') as infd:
results = pickle.load(infd)
# case where we don't have an info pickle but do have an outfile
elif (outfile and os.path.exists('tfa-collected-lcfinfo-%s-%s' %
(lcformat, os.path.basename(outfile)))):
with open(
'tfa-collected-lcinfo-%s-%s' %
(lcformat, os.path.basename(outfile)),
'rb'
) as infd:
results = pickle.load(infd)
# case where we have to redo the LC info collection
else:
# first, we'll collect the light curve info
tasks = [(x, lcformat, lcformat,
timecols, magcols, errcols,
custom_bandpasses) for x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(_collect_tfa_stats, tasks)
pool.close()
pool.join()
# save these results so we don't have to redo if something breaks here
if not outfile:
with open('tfa-collected-lcinfo-%s.pkl' % lcformat,'wb') as outfd:
pickle.dump(results, outfd, pickle.HIGHEST_PROTOCOL)
else:
with open(
'tfa-collected-lcinfo-%s-%s' %
(lcformat, os.path.basename(outfile)),
'wb'
) as outfd:
pickle.dump(results, outfd, pickle.HIGHEST_PROTOCOL)
#
# now, go through the light curve information
#
# find the center RA and center DEC -> median of all LC RAs and DECs
all_ras = np.array([res['ra'] for res in results])
all_decls = np.array([res['decl'] for res in results])
center_ra = np.nanmedian(all_ras)
center_decl = np.nanmedian(all_decls)
outdict = {
'timecols':[],
'magcols':[],
'errcols':[],
'center_ra':center_ra,
'center_decl':center_decl,
}
# for each magcol, we'll generate a separate template list
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
# these are the containers for possible template collection LC info
(lcmag, lcmad, lceta,
lcndet, lcobj, lcfpaths,
lcra, lcdecl) = [], [], [], [], [], [], [], []
outdict['timecols'].append(tcol)
outdict['magcols'].append(mcol)
outdict['errcols'].append(ecol)
# add to the collection of all light curves
outdict[mcol] = {'collection':{'mag':[],
'mad':[],
'eta':[],
'ndet':[],
'obj':[],
'lcf':[],
'ra':[],
'decl':[]}}
LOGINFO('magcol: %s, collecting prospective template LC info...' %
mcol)
# collect the template LCs for this magcol
for result in results:
# we'll only append objects that have all of these elements
try:
thismag = result['colorfeat'][mag_bandpass]
thismad = result[mcol]['mad']
thiseta = result[mcol]['eta_normal']
thisndet = result[mcol]['ndet']
thisobj = result['objectid']
thislcf = result['lcfpath']
thisra = result['ra']
thisdecl = result['decl']
outdict[mcol]['collection']['mag'].append(thismag)
outdict[mcol]['collection']['mad'].append(thismad)
outdict[mcol]['collection']['eta'].append(thiseta)
outdict[mcol]['collection']['ndet'].append(thisndet)
outdict[mcol]['collection']['obj'].append(thisobj)
outdict[mcol]['collection']['lcf'].append(thislcf)
outdict[mcol]['collection']['ra'].append(thisra)
outdict[mcol]['collection']['decl'].append(thisdecl)
# check if we have more than one bright or faint limit elem
if isinstance(mag_bright_limit, (list, tuple)):
use_bright_maglim = mag_bright_limit[
magcols.index(mcol)
]
else:
use_bright_maglim = mag_bright_limit
if isinstance(mag_faint_limit, (list, tuple)):
use_faint_maglim = mag_faint_limit[
magcols.index(mcol)
]
else:
use_faint_maglim = mag_faint_limit
# make sure the object lies in the mag limits and RMS limits we
# set before to try to accept it into the TFA ensemble
if ((use_bright_maglim < thismag < use_faint_maglim) and
(1.4826*thismad < max_rms)):
lcmag.append(thismag)
lcmad.append(thismad)
lceta.append(thiseta)
lcndet.append(thisndet)
lcobj.append(thisobj)
lcfpaths.append(thislcf)
lcra.append(thisra)
lcdecl.append(thisdecl)
except Exception as e:
pass
# make sure we have enough LCs to work on
if len(lcobj) >= min_template_number:
LOGINFO('magcol: %s, %s objects eligible for '
'template selection after filtering on mag '
'limits (%s, %s) and max RMS (%s)' %
(mcol, len(lcobj),
mag_bright_limit, mag_faint_limit, max_rms))
lcmag = np.array(lcmag)
lcmad = np.array(lcmad)
lceta = np.array(lceta)
lcndet = np.array(lcndet)
lcobj = np.array(lcobj)
lcfpaths = np.array(lcfpaths)
lcra = np.array(lcra)
lcdecl = np.array(lcdecl)
sortind = np.argsort(lcmag)
lcmag = lcmag[sortind]
lcmad = lcmad[sortind]
lceta = lceta[sortind]
lcndet = lcndet[sortind]
lcobj = lcobj[sortind]
lcfpaths = lcfpaths[sortind]
lcra = lcra[sortind]
lcdecl = lcdecl[sortind]
# 1. get the mag-MAD relation
# this is needed for spline fitting
# should take care of the pesky 'x must be strictly increasing' bit
splfit_ind = np.diff(lcmag) > 0.0
splfit_ind = np.concatenate((np.array([True]), splfit_ind))
fit_lcmag = lcmag[splfit_ind]
fit_lcmad = lcmad[splfit_ind]
fit_lceta = lceta[splfit_ind]
magmadfit = np.poly1d(np.polyfit(
fit_lcmag,
fit_lcmad,
2
))
magmadind = lcmad/magmadfit(lcmag) < max_mult_above_magmad
# 2. get the mag-eta relation
magetafit = np.poly1d(np.polyfit(
fit_lcmag,
fit_lceta,
2
))
magetaind = magetafit(lcmag)/lceta < max_mult_above_mageta
# 3. get the median ndet
median_ndet = np.median(lcndet)
ndetind = lcndet >= median_ndet
# form the final template ensemble
templateind = magmadind & magetaind & ndetind
# check again if we have enough LCs in the template
if templateind.sum() >= min_template_number:
LOGINFO('magcol: %s, %s objects selectable for TFA templates' %
(mcol, templateind.sum()))
templatemag = lcmag[templateind]
templatemad = lcmad[templateind]
templateeta = lceta[templateind]
templatendet = lcndet[templateind]
templateobj = lcobj[templateind]
templatelcf = lcfpaths[templateind]
templatera = lcra[templateind]
templatedecl = lcdecl[templateind]
# now, check if we have no more than the required fraction of
# TFA templates
target_number_templates = int(target_template_frac*len(results))
if target_number_templates > max_template_number:
target_number_templates = max_template_number
LOGINFO('magcol: %s, selecting %s TFA templates randomly' %
(mcol, target_number_templates))
# FIXME: how do we select uniformly in xi-eta?
# 1. 2D histogram the data into binsize (nx, ny)
# 2. random uniform select from 0 to nx-1, 0 to ny-1
# 3. pick object from selected bin
# 4. continue until we have target_number_templates
# 5. make sure the same object isn't picked twice
# get the xi-eta
template_cxi, template_ceta = coordutils.xieta_from_radecl(
templatera,
templatedecl,
center_ra,
center_decl
)
cxi_bins = np.linspace(template_cxi.min(),
template_cxi.max(),
num=xieta_bins)
ceta_bins = np.linspace(template_ceta.min(),
template_ceta.max(),
num=xieta_bins)
digitized_cxi_inds = np.digitize(template_cxi, cxi_bins)
digitized_ceta_inds = np.digitize(template_ceta, ceta_bins)
# pick target_number_templates indexes out of the bins
targetind = npr.choice(xieta_bins,
target_number_templates,
replace=True)
# put together the template lists
selected_template_obj = []
selected_template_lcf = []
selected_template_ndet = []
selected_template_ra = []
selected_template_decl = []
selected_template_mag = []
selected_template_mad = []
selected_template_eta = []
for ind in targetind:
pass
# select random uniform objects from the template candidates
targetind = npr.choice(templateobj.size,
target_number_templates,
replace=False)
templatemag = templatemag[targetind]
templatemad = templatemad[targetind]
templateeta = templateeta[targetind]
templatendet = templatendet[targetind]
templateobj = templateobj[targetind]
templatelcf = templatelcf[targetind]
templatera = templatera[targetind]
templatedecl = templatedecl[targetind]
# get the max ndet so far to use that LC as the timebase
maxndetind = templatendet == templatendet.max()
timebaselcf = templatelcf[maxndetind][0]
timebasendet = templatendet[maxndetind][0]
LOGINFO('magcol: %s, selected %s as template time '
'base LC with %s observations' %
(mcol, timebaselcf, timebasendet))
if process_template_lcs:
timebaselcdict = readerfunc(timebaselcf)
if ( (isinstance(timebaselcdict, (list, tuple))) and
(isinstance(timebaselcdict[0], dict)) ):
timebaselcdict = timebaselcdict[0]
# this is the timebase to use for all of the templates
timebase = _dict_get(timebaselcdict, tcolget)
else:
timebase = None
# also check if the number of templates is longer than the
# actual timebase of the observations. this will cause issues
# with overcorrections and will probably break TFA
if target_number_templates > timebasendet:
LOGWARNING('The number of TFA templates (%s) is '
'larger than the number of observations '
'of the time base (%s). This will likely '
'overcorrect all light curves to a '
'constant level. '
'Will use up to %s x timebase ndet '
'templates instead' %
(target_number_templates,
timebasendet,
max_target_frac_obs))
# regen the templates based on the new number
newmaxtemplates = int(max_target_frac_obs*timebasendet)
# choose this number out of the already chosen templates
# randomly
LOGWARNING('magcol: %s, re-selecting %s TFA '
'templates randomly' %
(mcol, newmaxtemplates))
# FIXME: how do we select uniformly in ra-decl?
# 1. 2D histogram the data into binsize (nx, ny)
# 2. random uniform select from 0 to nx-1, 0 to ny-1
# 3. pick object from selected bin
# 4. continue until we have target_number_templates
# 5. make sure the same object isn't picked twice
# select random uniform objects from the template candidates
targetind = npr.choice(templateobj.size,
newmaxtemplates,
replace=False)
templatemag = templatemag[targetind]
templatemad = templatemad[targetind]
templateeta = templateeta[targetind]
templatendet = templatendet[targetind]
templateobj = templateobj[targetind]
templatelcf = templatelcf[targetind]
templatera = templatera[targetind]
templatedecl = templatedecl[targetind]
# get the max ndet so far to use that LC as the timebase
maxndetind = templatendet == templatendet.max()
timebaselcf = templatelcf[maxndetind][0]
timebasendet = templatendet[maxndetind][0]
LOGWARNING('magcol: %s, re-selected %s as template time '
'base LC with %s observations' %
(mcol, timebaselcf, timebasendet))
if process_template_lcs:
timebaselcdict = readerfunc(timebaselcf)
if ( (isinstance(timebaselcdict, (list, tuple))) and
(isinstance(timebaselcdict[0], dict)) ):
timebaselcdict = timebaselcdict[0]
# this is the timebase to use for all of the templates
timebase = _dict_get(timebaselcdict, tcolget)
else:
timebase = None
#
# end of check for ntemplates > timebase ndet
#
if process_template_lcs:
LOGINFO('magcol: %s, reforming TFA template LCs to '
' chosen timebase...' % mcol)
# reform all template LCs to this time base, normalize to
# zero, and sigclip as requested. this is a parallel op
# first, we'll collect the light curve info
tasks = [(x, lcformat, lcformatdir,
tcol, mcol, ecol,
timebase, template_interpolate,
template_sigclip) for x
in templatelcf]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
reform_results = pool.map(_reform_templatelc_for_tfa, tasks)
pool.close()
pool.join()
# generate a 2D array for the template magseries with
# dimensions = (n_objects, n_lcpoints)
template_magseries = np.array([x['mags']
for x in reform_results])
template_errseries = np.array([x['errs']
for x in reform_results])
else:
template_magseries = None
template_errseries = None
# put everything into a templateinfo dict for this magcol
outdict[mcol].update({
'timebaselcf':timebaselcf,
'timebase':timebase,
'trendfits':{'mag-mad':magmadfit,
'mag-eta':magetafit},
'template_objects':templateobj,
'template_ra':templatera,
'template_decl':templatedecl,
'template_mag':templatemag,
'template_mad':templatemad,
'template_eta':templateeta,
'template_ndet':templatendet,
'template_magseries':template_magseries,
'template_errseries':template_errseries
})
# make a KDTree on the template coordinates
outdict[mcol]['template_radecl_kdtree'] = (
coordutils.make_kdtree(
templatera, templatedecl
)
)
# if we don't have enough, return nothing for this magcol
else:
LOGERROR('not enough objects meeting requested '
'MAD, eta, ndet conditions to '
'select templates for magcol: %s' % mcol)
continue
else:
LOGERROR('nobjects: %s, not enough in requested mag range to '
'select templates for magcol: %s' % (len(lcobj),mcol))
continue
# make the plots for mag-MAD/mag-eta relation and fits used
plt.plot(lcmag, lcmad, marker='o', linestyle='none', ms=1.0)
modelmags = np.linspace(lcmag.min(), lcmag.max(), num=1000)
plt.plot(modelmags, outdict[mcol]['trendfits']['mag-mad'](modelmags))
plt.yscale('log')
plt.xlabel('catalog magnitude')
plt.ylabel('light curve MAD')
plt.title('catalog mag vs. light curve MAD and fit')
plt.savefig('catmag-%s-lcmad-fit.png' % mcol,
bbox_inches='tight')
plt.close('all')
plt.plot(lcmag, lceta, marker='o', linestyle='none', ms=1.0)
modelmags = np.linspace(lcmag.min(), lcmag.max(), num=1000)
plt.plot(modelmags, outdict[mcol]['trendfits']['mag-eta'](modelmags))
plt.yscale('log')
plt.xlabel('catalog magnitude')
plt.ylabel('light curve eta variable index')
plt.title('catalog mag vs. light curve eta and fit')
plt.savefig('catmag-%s-lceta-fit.png' % mcol,
bbox_inches='tight')
plt.close('all')
#
# end of operating on each magcol
#
# save the templateinfo dict to a pickle if requested
if outfile:
if outfile.endswith('.gz'):
outfd = gzip.open(outfile,'wb')
else:
outfd = open(outfile,'wb')
with outfd:
pickle.dump(outdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
# return the templateinfo dict
return outdict
|
def apply_tfa_magseries(lcfile,
timecol,
magcol,
errcol,
templateinfo,
mintemplatedist_arcmin=10.0,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0):
'''This applies the TFA correction to an LC given TFA template information.
Parameters
----------
lcfile : str
This is the light curve file to apply the TFA correction to.
timecol,magcol,errcol : str
These are the column keys in the lcdict for the LC file to apply the TFA
correction to.
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming this light curve to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to this light curve before running TFA
on it.
Returns
-------
str
This returns the filename of the light curve file generated after TFA
applications. This is a pickle (that can be read by `lcproc.read_pklc`)
in the same directory as `lcfile`. The `magcol` will be encoded in the
filename, so each `magcol` in `lcfile` gets its own output file.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# get the templateinfo from a pickle if necessary
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
lcdict = readerfunc(lcfile)
if ((isinstance(lcdict, (tuple, list))) and
isinstance(lcdict[0], dict)):
lcdict = lcdict[0]
objectid = lcdict['objectid']
# this is the initial template array
tmagseries = templateinfo[magcol][
'template_magseries'
][::]
# if the object itself is in the template ensemble, remove it
if objectid in templateinfo[magcol]['template_objects']:
LOGWARNING('object %s found in the TFA template ensemble, removing...' %
objectid)
templateind = templateinfo[magcol]['template_objects'] == objectid
# get the objects in the tmagseries not corresponding to the current
# object's index
tmagseries = tmagseries[~templateind,:]
# check if there are close matches to the current object in the templates
object_matches = coordutils.conesearch_kdtree(
templateinfo[magcol]['template_radecl_kdtree'],
lcdict['objectinfo']['ra'], lcdict['objectinfo']['decl'],
mintemplatedist_arcmin/60.0
)
if len(object_matches) > 0:
LOGWARNING(
"object %s is within %.1f arcminutes of %s "
"template objects. Will remove these objects "
"from the template applied to this object." %
(objectid, mintemplatedist_arcmin, len(object_matches))
)
removalind = np.full(
templateinfo[magcol]['template_objects'].size,
False, dtype=np.bool
)
removalind[np.array(object_matches)] = True
tmagseries = tmagseries[~removalind,:]
#
# finally, proceed to TFA
#
# this is the normal matrix
normal_matrix = np.dot(tmagseries, tmagseries.T)
# get the inverse of the matrix
normal_matrix_inverse = spla.pinv2(normal_matrix)
# get the timebase from the template
timebase = templateinfo[magcol]['timebase']
# use this to reform the target lc in the same manner as that for a TFA
# template LC
reformed_targetlc = _reform_templatelc_for_tfa((
lcfile,
lcformat,
lcformatdir,
timecol,
magcol,
errcol,
timebase,
interp,
sigclip
))
# calculate the scalar products of the target and template magseries
scalar_products = np.dot(tmagseries, reformed_targetlc['mags'])
# calculate the corrections
corrections = np.dot(normal_matrix_inverse, scalar_products)
# finally, get the corrected time series for the target object
corrected_magseries = (
reformed_targetlc['origmags'] -
np.dot(tmagseries.T, corrections)
)
outdict = {
'times':timebase,
'mags':corrected_magseries,
'errs':reformed_targetlc['errs'],
'mags_median':np.median(corrected_magseries),
'mags_mad': np.median(np.abs(corrected_magseries -
np.median(corrected_magseries))),
'work':{'tmagseries':tmagseries,
'normal_matrix':normal_matrix,
'normal_matrix_inverse':normal_matrix_inverse,
'scalar_products':scalar_products,
'corrections':corrections,
'reformed_targetlc':reformed_targetlc},
}
# we'll write back the tfa times and mags to the lcdict
lcdict['tfa'] = outdict
outfile = os.path.join(
os.path.dirname(lcfile),
'%s-tfa-%s-pklc.pkl' % (
squeeze(objectid).replace(' ','-'),
magcol
)
)
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)
return outfile
|
def _parallel_tfa_worker(task):
'''
This is a parallel worker for the function below.
task[0] = lcfile
task[1] = timecol
task[2] = magcol
task[3] = errcol
task[4] = templateinfo
task[5] = lcformat
task[6] = lcformatdir
task[6] = interp
task[7] = sigclip
'''
(lcfile, timecol, magcol, errcol,
templateinfo, lcformat, lcformatdir,
interp, sigclip, mintemplatedist_arcmin) = task
try:
res = apply_tfa_magseries(
lcfile, timecol, magcol, errcol,
templateinfo,
lcformat=lcformat,
lcformatdir=lcformatdir,
interp=interp,
sigclip=sigclip,
mintemplatedist_arcmin=mintemplatedist_arcmin
)
if res:
LOGINFO('%s -> %s TFA OK' % (lcfile, res))
return res
except Exception as e:
LOGEXCEPTION('TFA failed for %s' % lcfile)
return None
|
def parallel_tfa_lclist(lclist,
templateinfo,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0,
mintemplatedist_arcmin=10.0,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies TFA in parallel to all LCs in the given list of file names.
Parameters
----------
lclist : str
This is a list of light curve files to apply TFA correction to.
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
timecols : list of str or None
The timecol keys to use from the lcdict in applying TFA corrections.
magcols : list of str or None
The magcol keys to use from the lcdict in applying TFA corrections.
errcols : list of str or None
The errcol keys to use from the lcdict in applying TFA corrections.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming the light curves to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to the light curves before running TFA
on it.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
nworkers : int
The number of parallel workers to launch
maxworkertasks : int
The maximum number of tasks per worker allowed before it's replaced by a
fresh one.
Returns
-------
dict
Contains the input file names and output TFA light curve filenames per
input file organized by each `magcol` in `magcols`.
'''
# open the templateinfo first
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
# we'll get the defaults from the templateinfo object
if timecols is None:
timecols = templateinfo['timecols']
if magcols is None:
magcols = templateinfo['magcols']
if errcols is None:
errcols = templateinfo['errcols']
outdict = {}
# run by magcol
for t, m, e in zip(timecols, magcols, errcols):
tasks = [(x, t, m, e, templateinfo,
lcformat, lcformatdir,
interp, sigclip) for
x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(_parallel_tfa_worker, tasks)
pool.close()
pool.join()
outdict[m] = results
return outdict
|
def parallel_tfa_lcdir(lcdir,
templateinfo,
lcfileglob=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0,
mintemplatedist_arcmin=10.0,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies TFA in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
This is the directory containing the light curve files to process..
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
lcfileglob : str or None
The UNIX file glob to use when searching for light curve files in
`lcdir`. If None, the default file glob associated with registered LC
format provided is used.
timecols : list of str or None
The timecol keys to use from the lcdict in applying TFA corrections.
magcols : list of str or None
The magcol keys to use from the lcdict in applying TFA corrections.
errcols : list of str or None
The errcol keys to use from the lcdict in applying TFA corrections.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming the light curves to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to the light curves before running TFA
on it.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
nworkers : int
The number of parallel workers to launch
maxworkertasks : int
The maximum number of tasks per worker allowed before it's replaced by a
fresh one.
Returns
-------
dict
Contains the input file names and output TFA light curve filenames per
input file organized by each `magcol` in `magcols`.
'''
# open the templateinfo first
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# find all the files matching the lcglob in lcdir
if lcfileglob is None:
lcfileglob = dfileglob
lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))
return parallel_tfa_lclist(
lclist,
templateinfo,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
lcformatdir=None,
interp=interp,
sigclip=sigclip,
mintemplatedist_arcmin=mintemplatedist_arcmin,
nworkers=nworkers,
maxworkertasks=maxworkertasks
)
|
def update_checkplot_objectinfo(cpf,
fast_mode=False,
findercmap='gray_r',
finderconvolve=None,
deredden_object=True,
custom_bandpasses=None,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True):
'''This updates a checkplot objectinfo dict.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplot
if they exist::
comments
varinfo
objectinfo.objecttags
Parameters
----------
cpf : str
The path to the checkplot pickle to update.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
:py:func:`astrobase.checkplot.pkl_utils._pkl_finder_objectinfo` for
details on how this works. If this is True, will run in "fast" mode with
default timeouts (5 seconds in most cases). If this is a float, will run
in "fast" mode with the provided timeout value in seconds.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
:py:data:`astrobase.services.gaia.GAIA_URLS` dict which defines the URLs
to hit for each mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
str
Path to the updated checkplot pickle file.
'''
cpd = _read_checkplot_picklefile(cpf)
if cpd['objectinfo']['objecttags'] is not None:
objecttags = cpd['objectinfo']['objecttags'][::]
else:
objecttags = None
varinfo = deepcopy(cpd['varinfo'])
if 'comments' in cpd and cpd['comments'] is not None:
comments = cpd['comments'][::]
else:
comments = None
newcpd = _pkl_finder_objectinfo(cpd['objectinfo'],
varinfo,
findercmap,
finderconvolve,
cpd['sigclip'],
cpd['normto'],
cpd['normmingap'],
fast_mode=fast_mode,
deredden_object=deredden_object,
custom_bandpasses=custom_bandpasses,
gaia_submit_timeout=gaia_submit_timeout,
gaia_submit_tries=gaia_submit_tries,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
maxnumneighbors=maxnumneighbors,
plotdpi=plotdpi,
findercachedir=findercachedir,
verbose=verbose)
#
# don't update neighbors or finder chart if the new one is bad
#
if (newcpd['finderchart'] is None and
cpd['finderchart'] is not None):
newcpd['finderchart'] = deepcopy(
cpd['finderchart']
)
if (newcpd['neighbors'] is None and
cpd['neighbors'] is not None):
newcpd['neighbors'] = deepcopy(
cpd['neighbors']
)
#
# if there's existing GAIA info, don't overwrite if the new objectinfo dict
# doesn't have any
#
if (('failed' in newcpd['objectinfo']['gaia_status'] or
('gaiaid' in newcpd['objectinfo'] and
newcpd['objectinfo']['gaiaid'] is None)) and
'ok' in cpd['objectinfo']['gaia_status']):
newcpd['objectinfo']['gaia_status'] = deepcopy(
cpd['objectinfo']['gaia_status']
)
if 'gaiaid' in cpd['objectinfo']:
newcpd['objectinfo']['gaiaid'] = deepcopy(
cpd['objectinfo']['gaiaid']
)
newcpd['objectinfo']['gaiamag'] = deepcopy(
cpd['objectinfo']['gaiamag']
)
newcpd['objectinfo']['gaia_absmag'] = deepcopy(
cpd['objectinfo']['gaia_absmag']
)
newcpd['objectinfo']['gaia_parallax'] = deepcopy(
cpd['objectinfo']['gaia_parallax']
)
newcpd['objectinfo']['gaia_parallax_err'] = deepcopy(
cpd['objectinfo']['gaia_parallax_err']
)
newcpd['objectinfo']['gaia_pmra'] = deepcopy(
cpd['objectinfo']['gaia_pmra']
)
newcpd['objectinfo']['gaia_pmra_err'] = deepcopy(
cpd['objectinfo']['gaia_pmra_err']
)
newcpd['objectinfo']['gaia_pmdecl'] = deepcopy(
cpd['objectinfo']['gaia_pmdecl']
)
newcpd['objectinfo']['gaia_pmdecl_err'] = deepcopy(
cpd['objectinfo']['gaia_pmdecl_err']
)
if (not np.isfinite(newcpd['objectinfo']['gaia_neighbors']) and
np.isfinite(cpd['objectinfo']['gaia_neighbors'])):
newcpd['objectinfo']['gaia_neighbors'] = deepcopy(
cpd['objectinfo']['gaia_neighbors']
)
if (not np.isfinite(newcpd['objectinfo']['gaia_closest_distarcsec']) and
np.isfinite(cpd['objectinfo']['gaia_closest_distarcsec'])):
newcpd['objectinfo']['gaia_closest_distarcsec'] = deepcopy(
cpd['objectinfo']['gaia_closest_gmagdiff']
)
if (not np.isfinite(newcpd['objectinfo']['gaia_closest_gmagdiff']) and
np.isfinite(cpd['objectinfo']['gaia_closest_gmagdiff'])):
newcpd['objectinfo']['gaia_closest_gmagdiff'] = deepcopy(
cpd['objectinfo']['gaia_closest_gmagdiff']
)
if (newcpd['objectinfo']['gaia_ids'] is None and
cpd['objectinfo']['gaia_ids'] is not None):
newcpd['objectinfo']['gaia_ids'] = deepcopy(
cpd['objectinfo']['gaia_ids']
)
if (newcpd['objectinfo']['gaia_xypos'] is None and
cpd['objectinfo']['gaia_xypos'] is not None):
newcpd['objectinfo']['gaia_xypos'] = deepcopy(
cpd['objectinfo']['gaia_xypos']
)
if (newcpd['objectinfo']['gaia_mags'] is None and
cpd['objectinfo']['gaia_mags'] is not None):
newcpd['objectinfo']['gaia_mags'] = deepcopy(
cpd['objectinfo']['gaia_mags']
)
if (newcpd['objectinfo']['gaia_parallaxes'] is None and
cpd['objectinfo']['gaia_parallaxes'] is not None):
newcpd['objectinfo']['gaia_parallaxes'] = deepcopy(
cpd['objectinfo']['gaia_parallaxes']
)
if (newcpd['objectinfo']['gaia_parallax_errs'] is None and
cpd['objectinfo']['gaia_parallax_errs'] is not None):
newcpd['objectinfo']['gaia_parallax_errs'] = deepcopy(
cpd['objectinfo']['gaia_parallax_errs']
)
if (newcpd['objectinfo']['gaia_pmras'] is None and
cpd['objectinfo']['gaia_pmras'] is not None):
newcpd['objectinfo']['gaia_pmras'] = deepcopy(
cpd['objectinfo']['gaia_pmras']
)
if (newcpd['objectinfo']['gaia_pmra_errs'] is None and
cpd['objectinfo']['gaia_pmra_errs'] is not None):
newcpd['objectinfo']['gaia_pmra_errs'] = deepcopy(
cpd['objectinfo']['gaia_pmra_errs']
)
if (newcpd['objectinfo']['gaia_pmdecls'] is None and
cpd['objectinfo']['gaia_pmdecls'] is not None):
newcpd['objectinfo']['gaia_pmdecls'] = deepcopy(
cpd['objectinfo']['gaia_pmdecls']
)
if (newcpd['objectinfo']['gaia_pmdecl_errs'] is None and
cpd['objectinfo']['gaia_pmdecl_errs'] is not None):
newcpd['objectinfo']['gaia_pmdecl_errs'] = deepcopy(
cpd['objectinfo']['gaia_pmdecl_errs']
)
if (newcpd['objectinfo']['gaia_absolute_mags'] is None and
cpd['objectinfo']['gaia_absolute_mags'] is not None):
newcpd['objectinfo']['gaia_absolute_mags'] = deepcopy(
cpd['objectinfo']['gaia_absolute_mags']
)
if (newcpd['objectinfo']['gaiak_colors'] is None and
cpd['objectinfo']['gaiak_colors'] is not None):
newcpd['objectinfo']['gaiak_colors'] = deepcopy(
cpd['objectinfo']['gaiak_colors']
)
if (newcpd['objectinfo']['gaia_dists'] is None and
cpd['objectinfo']['gaia_dists'] is not None):
newcpd['objectinfo']['gaia_dists'] = deepcopy(
cpd['objectinfo']['gaia_dists']
)
#
# don't overwrite good SIMBAD info with bad
#
if ('failed' in newcpd['objectinfo']['simbad_status'] and
'ok' in cpd['objectinfo']['simbad_status']):
newcpd['objectinfo']['simbad_status'] = deepcopy(
cpd['objectinfo']['simbad_status']
)
if (newcpd['objectinfo']['simbad_nmatches'] is None and
cpd['objectinfo']['simbad_nmatches'] is not None):
newcpd['objectinfo']['simbad_nmatches'] = deepcopy(
cpd['objectinfo']['simbad_nmatches']
)
if (newcpd['objectinfo']['simbad_mainid'] is None and
cpd['objectinfo']['simbad_mainid'] is not None):
newcpd['objectinfo']['simbad_mainid'] = deepcopy(
cpd['objectinfo']['simbad_mainid']
)
if (newcpd['objectinfo']['simbad_objtype'] is None and
cpd['objectinfo']['simbad_objtype'] is not None):
newcpd['objectinfo']['simbad_objtype'] = deepcopy(
cpd['objectinfo']['simbad_objtype']
)
if (newcpd['objectinfo']['simbad_allids'] is None and
cpd['objectinfo']['simbad_allids'] is not None):
newcpd['objectinfo']['simbad_allids'] = deepcopy(
cpd['objectinfo']['simbad_allids']
)
if (newcpd['objectinfo']['simbad_distarcsec'] is None and
cpd['objectinfo']['simbad_distarcsec'] is not None):
newcpd['objectinfo']['simbad_distarcsec'] = deepcopy(
cpd['objectinfo']['simbad_distarcsec']
)
if (newcpd['objectinfo']['simbad_best_mainid'] is None and
cpd['objectinfo']['simbad_best_mainid'] is not None):
newcpd['objectinfo']['simbad_best_mainid'] = deepcopy(
cpd['objectinfo']['simbad_best_mainid']
)
if (newcpd['objectinfo']['simbad_best_objtype'] is None and
cpd['objectinfo']['simbad_best_objtype'] is not None):
newcpd['objectinfo']['simbad_best_objtype'] = deepcopy(
cpd['objectinfo']['simbad_best_objtype']
)
if (newcpd['objectinfo']['simbad_best_allids'] is None and
cpd['objectinfo']['simbad_best_allids'] is not None):
newcpd['objectinfo']['simbad_best_allids'] = deepcopy(
cpd['objectinfo']['simbad_best_allids']
)
if (newcpd['objectinfo']['simbad_best_distarcsec'] is None and
cpd['objectinfo']['simbad_best_distarcsec'] is not None):
newcpd['objectinfo']['simbad_best_distarcsec'] = deepcopy(
cpd['objectinfo']['simbad_best_distarcsec']
)
#
# update the objectinfo dict
#
cpd.update(newcpd)
cpd['objectinfo']['objecttags'] = objecttags
cpd['comments'] = comments
newcpf = _write_checkplot_picklefile(cpd, outfile=cpf)
return newcpf
|
def _read_pklc(lcfile):
'''
This just reads a light curve pickle file.
Parameters
----------
lcfile : str
The file name of the pickle to open.
Returns
-------
dict
This returns an lcdict.
'''
if lcfile.endswith('.gz'):
try:
with gzip.open(lcfile,'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with gzip.open(lcfile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
else:
try:
with open(lcfile,'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(lcfile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
return lcdict
|
def _check_extmodule(module, formatkey):
'''This imports the module specified.
Used to dynamically import Python modules that are needed to support LC
formats not natively supported by astrobase.
Parameters
----------
module : str
This is either:
- a Python module import path, e.g. 'astrobase.lcproc.catalogs' or
- a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'
that contains the Python module that contains functions used to open
(and optionally normalize) a custom LC format that's not natively
supported by astrobase.
formatkey : str
A str used as the unique ID of this LC format for all lcproc functions
and can be used to look it up later and import the correct functions
needed to support it for lcproc operations. For example, we use
'kep-fits' as a the specifier for Kepler FITS light curves, which can be
read by the `astrobase.astrokep.read_kepler_fitslc` function as
specified by the `<astrobase install path>/data/lcformats/kep-fits.json`
LC format specification JSON.
Returns
-------
Python module
This returns a Python module if it's able to successfully import it.
'''
try:
if os.path.exists(module):
sys.path.append(os.path.dirname(module))
importedok = importlib.import_module(
os.path.basename(module.replace('.py',''))
)
else:
importedok = importlib.import_module(module)
except Exception as e:
LOGEXCEPTION('could not import the module: %s for LC format: %s. '
'check the file path or fully qualified module name?'
% (module, formatkey))
importedok = False
return importedok
|
def register_lcformat(formatkey,
fileglob,
timecols,
magcols,
errcols,
readerfunc_module,
readerfunc,
readerfunc_kwargs=None,
normfunc_module=None,
normfunc=None,
normfunc_kwargs=None,
magsarefluxes=False,
overwrite_existing=False,
lcformat_dir='~/.astrobase/lcformat-jsons'):
'''This adds a new LC format to the astrobase LC format registry.
Allows handling of custom format light curves for astrobase lcproc
drivers. Once the format is successfully registered, light curves should
work transparently with all of the functions in this module, by simply
calling them with the `formatkey` in the `lcformat` keyword argument.
LC format specifications are generated as JSON files. astrobase comes with
several of these in `<astrobase install path>/data/lcformats`. LC formats
you add by using this function will have their specifiers written to the
`~/.astrobase/lcformat-jsons` directory in your home directory.
Parameters
----------
formatkey : str
A str used as the unique ID of this LC format for all lcproc functions
and can be used to look it up later and import the correct functions
needed to support it for lcproc operations. For example, we use
'kep-fits' as a the specifier for Kepler FITS light curves, which can be
read by the `astrobase.astrokep.read_kepler_fitslc` function as
specified by the `<astrobase install path>/data/lcformats/kep-fits.json`
LC format specification JSON produced by `register_lcformat`.
fileglob : str
The default UNIX fileglob to use to search for light curve files in this
LC format. This is a string like '*-whatever-???-*.*??-.lc'.
timecols,magcols,errcols : list of str
These are all lists of strings indicating which keys in the lcdict
produced by your `lcreader_func` that will be extracted and used by
lcproc functions for processing. The lists must all have the same
dimensions, e.g. if timecols = ['timecol1','timecol2'], then magcols
must be something like ['magcol1','magcol2'] and errcols must be
something like ['errcol1', 'errcol2']. This allows you to process
multiple apertures or multiple types of measurements in one go.
Each element in these lists can be a simple key, e.g. 'time' (which
would correspond to lcdict['time']), or a composite key,
e.g. 'aperture1.times.rjd' (which would correspond to
lcdict['aperture1']['times']['rjd']). See the examples in the lcformat
specification JSON files in `<astrobase install path>/data/lcformats`.
readerfunc_module : str
This is either:
- a Python module import path, e.g. 'astrobase.lcproc.catalogs' or
- a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'
that contains the Python module that contains functions used to open
(and optionally normalize) a custom LC format that's not natively
supported by astrobase.
readerfunc : str
This is the function name in `readerfunc_module` to use to read light
curves in the custom format. This MUST always return a dictionary (the
'lcdict') with the following signature (the keys listed below are
required, but others are allowed)::
{'objectid': this object's identifier as a string,
'objectinfo':{'ra': this object's right ascension in decimal deg,
'decl': this object's declination in decimal deg,
'ndet': the number of observations in this LC,
'objectid': the object ID again for legacy reasons},
...other time columns, mag columns go in as their own keys}
normfunc_kwargs : dict or None
This is a dictionary containing any kwargs to pass through to
the light curve norm function.
normfunc_module : str or None
This is either:
- a Python module import path, e.g. 'astrobase.lcproc.catalogs' or
- a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'
- None, in which case we'll use default normalization
that contains the Python module that contains functions used to
normalize a custom LC format that's not natively supported by astrobase.
normfunc : str or None
This is the function name in `normfunc_module` to use to normalize light
curves in the custom format. If None, the default normalization method
used by lcproc is to find gaps in the time-series, normalize
measurements grouped by these gaps to zero, then normalize the entire
magnitude time series to global time series median using the
`astrobase.lcmath.normalize_magseries` function.
If this is provided, the normalization function should take and return
an lcdict of the same form as that produced by `readerfunc` above. For
an example of a specific normalization function, see
`normalize_lcdict_by_inst` in the `astrobase.hatsurveys.hatlc` module.
normfunc_kwargs : dict or None
This is a dictionary containing any kwargs to pass through to
the light curve normalization function.
magsarefluxes : bool
If this is True, then all lcproc functions will treat the measurement
columns in the lcdict produced by your `readerfunc` as flux instead of
mags, so things like default normalization and sigma-clipping will be
done correctly. If this is False, magnitudes will be treated as
magnitudes.
overwrite_existing : bool
If this is True, this function will overwrite any existing LC format
specification JSON with the same name as that provided in the
`formatkey` arg. This can be used to update LC format specifications
while keeping the `formatkey` the same.
lcformat_dir : str
This specifies the directory where the the LC format specification JSON
produced by this function will be written. By default, this goes to the
`.astrobase/lcformat-jsons` directory in your home directory.
Returns
-------
str
Returns the file path to the generated LC format specification JSON
file.
'''
LOGINFO('adding %s to LC format registry...' % formatkey)
# search for the lcformat_dir and create it if it doesn't exist
lcformat_dpath = os.path.abspath(
os.path.expanduser(lcformat_dir)
)
if not os.path.exists(lcformat_dpath):
os.makedirs(lcformat_dpath)
lcformat_jsonpath = os.path.join(lcformat_dpath,'%s.json' % formatkey)
if os.path.exists(lcformat_jsonpath) and not overwrite_existing:
LOGERROR('There is an existing lcformat JSON: %s '
'for this formatkey: %s and '
'overwrite_existing = False, skipping...'
% (lcformat_jsonpath, formatkey))
return None
# see if we can import the reader module
readermodule = _check_extmodule(readerfunc_module, formatkey)
if not readermodule:
LOGERROR("could not import the required "
"module: %s to read %s light curves" %
(readerfunc_module, formatkey))
return None
# then, get the function we need to read the light curve
try:
getattr(readermodule, readerfunc)
readerfunc_in = readerfunc
except AttributeError:
LOGEXCEPTION('Could not get the specified reader '
'function: %s for lcformat: %s '
'from module: %s'
% (formatkey, readerfunc_module, readerfunc))
raise
# see if we can import the normalization module
if normfunc_module:
normmodule = _check_extmodule(normfunc_module, formatkey)
if not normmodule:
LOGERROR("could not import the required "
"module: %s to normalize %s light curves" %
(normfunc_module, formatkey))
return None
else:
normmodule = None
# finally, get the function we need to normalize the light curve
if normfunc_module and normfunc:
try:
getattr(normmodule, normfunc)
normfunc_in = normfunc
except AttributeError:
LOGEXCEPTION('Could not get the specified norm '
'function: %s for lcformat: %s '
'from module: %s'
% (normfunc, formatkey, normfunc_module))
raise
else:
normfunc_in = None
# if we made it to here, then everything's good. generate the JSON
# structure
formatdict = {'fileglob':fileglob,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'magsarefluxes':magsarefluxes,
'lcreader_module':readerfunc_module,
'lcreader_func':readerfunc_in,
'lcreader_kwargs':readerfunc_kwargs,
'lcnorm_module':normfunc_module,
'lcnorm_func':normfunc_in,
'lcnorm_kwargs':normfunc_kwargs}
# write this to the lcformat directory
with open(lcformat_jsonpath,'w') as outfd:
json.dump(formatdict, outfd, indent=4)
return lcformat_jsonpath
|
def get_lcformat(formatkey, use_lcformat_dir=None):
'''This loads an LC format description from a previously-saved JSON file.
Parameters
----------
formatkey : str
The key used to refer to the LC format. This is part of the JSON file's
name, e.g. the format key 'hat-csv' maps to the format JSON file:
'<astrobase install path>/data/lcformats/hat-csv.json'.
use_lcformat_dir : str or None
If provided, must be the path to a directory that contains the
corresponding lcformat JSON file for `formatkey`. If this is None, this
function will look for lcformat JSON files corresponding to the given
`formatkey`:
- first, in the directory specified in this kwarg,
- if not found there, in the home directory: ~/.astrobase/lcformat-jsons
- if not found there, in: <astrobase install path>/data/lcformats
Returns
-------
tuple
A tuple of the following form is returned::
(fileglob : the file glob of the associated LC files,
readerfunc_in : the imported Python function for reading LCs,
timecols : list of time col keys to get from the lcdict,
magcols : list of mag col keys to get from the lcdict ,
errcols : list of err col keys to get from the lcdict,
magsarefluxes : True if the measurements are fluxes not mags,
normfunc_in : the imported Python function for normalizing LCs)
All `astrobase.lcproc` functions can then use this tuple to dynamically
import your LC reader and normalization functions to work with your LC
format transparently.
'''
if isinstance(use_lcformat_dir, str):
# look for the lcformat JSON
lcformat_jsonpath = os.path.join(
use_lcformat_dir,
'%s.json' % formatkey
)
if not os.path.exists(lcformat_jsonpath):
lcformat_jsonpath = os.path.join(
os.path.expanduser('~/.astrobase/lcformat-jsons'),
'%s.json' % formatkey
)
if not os.path.exists(lcformat_jsonpath):
install_path = os.path.dirname(__file__)
install_path = os.path.abspath(
os.path.join(install_path, '..', 'data','lcformats')
)
lcformat_jsonpath = os.path.join(
install_path,
'%s.json' % formatkey
)
if not os.path.exists(lcformat_jsonpath):
LOGERROR('could not find an lcformat JSON '
'for formatkey: %s in any of: '
'use_lcformat_dir, home directory, '
'astrobase installed data directory'
% formatkey)
return None
else:
lcformat_jsonpath = os.path.join(
os.path.expanduser('~/.astrobase/lcformat-jsons'),
'%s.json' % formatkey
)
if not os.path.exists(lcformat_jsonpath):
install_path = os.path.dirname(__file__)
install_path = os.path.abspath(
os.path.join(install_path, '..', 'data','lcformats')
)
lcformat_jsonpath = os.path.join(
install_path,
'%s.json' % formatkey
)
if not os.path.exists(lcformat_jsonpath):
LOGERROR('could not find an lcformat JSON '
'for formatkey: %s in any of: '
'use_lcformat_dir, home directory, '
'astrobase installed data directory'
% formatkey)
return None
# load the found lcformat JSON
with open(lcformat_jsonpath) as infd:
lcformatdict = json.load(infd)
readerfunc_module = lcformatdict['lcreader_module']
readerfunc = lcformatdict['lcreader_func']
readerfunc_kwargs = lcformatdict['lcreader_kwargs']
normfunc_module = lcformatdict['lcnorm_module']
normfunc = lcformatdict['lcnorm_func']
normfunc_kwargs = lcformatdict['lcnorm_kwargs']
fileglob = lcformatdict['fileglob']
timecols = lcformatdict['timecols']
magcols = lcformatdict['magcols']
errcols = lcformatdict['errcols']
magsarefluxes = lcformatdict['magsarefluxes']
# import all the required bits
# see if we can import the reader module
readermodule = _check_extmodule(readerfunc_module, formatkey)
if not readermodule:
LOGERROR("could not import the required "
"module: %s to read %s light curves" %
(readerfunc_module, formatkey))
return None
# then, get the function we need to read the light curve
try:
readerfunc_in = getattr(readermodule, readerfunc)
except AttributeError:
LOGEXCEPTION('Could not get the specified reader '
'function: %s for lcformat: %s '
'from module: %s'
% (formatkey, readerfunc_module, readerfunc))
raise
# see if we can import the normalization module
if normfunc_module:
normmodule = _check_extmodule(normfunc_module, formatkey)
if not normmodule:
LOGERROR("could not import the required "
"module: %s to normalize %s light curves" %
(normfunc_module, formatkey))
return None
else:
normmodule = None
# finally, get the function we need to normalize the light curve
if normfunc_module and normfunc:
try:
normfunc_in = getattr(normmodule, normfunc)
except AttributeError:
LOGEXCEPTION('Could not get the specified norm '
'function: %s for lcformat: %s '
'from module: %s'
% (formatkey, normfunc_module, normfunc))
raise
else:
normfunc_in = None
# add in any optional kwargs that need to be there for readerfunc
if isinstance(readerfunc_kwargs, dict):
readerfunc_in = partial(readerfunc_in, **readerfunc_kwargs)
# add in any optional kwargs that need to be there for normfunc
if normfunc_in is not None:
if isinstance(normfunc_kwargs, dict):
normfunc_in = partial(normfunc_in, **normfunc_kwargs)
# assemble the return tuple
# this can be used directly by other lcproc functions
returntuple = (
fileglob,
readerfunc_in,
timecols,
magcols,
errcols,
magsarefluxes,
normfunc_in,
)
return returntuple
|
def ec2_ssh(ip_address,
keypem_file,
username='ec2-user',
raiseonfail=False):
"""This opens an SSH connection to the EC2 instance at `ip_address`.
Parameters
----------
ip_address : str
IP address of the AWS EC2 instance to connect to.
keypem_file : str
The path to the keypair PEM file generated by AWS to allow SSH
connections.
username : str
The username to use to login to the EC2 instance.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
paramiko.SSHClient
This has all the usual `paramiko` functionality:
- Use `SSHClient.exec_command(command, environment=None)` to exec a
shell command.
- Use `SSHClient.open_sftp()` to get a `SFTPClient` for the server. Then
call SFTPClient.get() and .put() to copy files from and to the server.
"""
c = paramiko.client.SSHClient()
c.load_system_host_keys()
c.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)
# load the private key from the AWS keypair pem
privatekey = paramiko.RSAKey.from_private_key_file(keypem_file)
# connect to the server
try:
c.connect(ip_address,
pkey=privatekey,
username='ec2-user')
return c
except Exception as e:
LOGEXCEPTION('could not connect to EC2 instance at %s '
'using keyfile: %s and user: %s' %
(ip_address, keypem_file, username))
if raiseonfail:
raise
return None
|
def s3_get_file(bucket,
filename,
local_file,
altexts=None,
client=None,
raiseonfail=False):
"""This gets a file from an S3 bucket.
Parameters
----------
bucket : str
The AWS S3 bucket name.
filename : str
The full filename of the file to get from the bucket
local_file : str
Path to where the downloaded file will be stored.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful.
"""
if not client:
client = boto3.client('s3')
try:
client.download_file(bucket, filename, local_file)
return local_file
except Exception as e:
if altexts is not None:
for alt_extension in altexts:
split_ext = os.path.splitext(filename)
check_file = split_ext[0] + alt_extension
try:
client.download_file(
bucket,
check_file,
local_file.replace(split_ext[-1],
alt_extension)
)
return local_file.replace(split_ext[-1],
alt_extension)
except Exception as e:
pass
else:
LOGEXCEPTION('could not download s3://%s/%s' % (bucket, filename))
if raiseonfail:
raise
return None
|
def s3_get_url(url,
altexts=None,
client=None,
raiseonfail=False):
"""This gets a file from an S3 bucket based on its s3:// URL.
Parameters
----------
url : str
S3 URL to download. This should begin with 's3://'.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful. The file will be downloaded into the current working
directory and will have a filename == basename of the file on S3.
"""
bucket_item = url.replace('s3://','')
bucket_item = bucket_item.split('/')
bucket = bucket_item[0]
filekey = '/'.join(bucket_item[1:])
return s3_get_file(bucket,
filekey,
bucket_item[-1],
altexts=altexts,
client=client,
raiseonfail=raiseonfail)
|
def s3_put_file(local_file, bucket, client=None, raiseonfail=False):
"""This uploads a file to S3.
Parameters
----------
local_file : str
Path to the file to upload to S3.
bucket : str
The AWS S3 bucket to upload the file to.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str or None
If the file upload is successful, returns the s3:// URL of the uploaded
file. If it failed, will return None.
"""
if not client:
client = boto3.client('s3')
try:
client.upload_file(local_file, bucket, os.path.basename(local_file))
return 's3://%s/%s' % (bucket, os.path.basename(local_file))
except Exception as e:
LOGEXCEPTION('could not upload %s to bucket: %s' % (local_file,
bucket))
if raiseonfail:
raise
return None
|
def s3_delete_file(bucket, filename, client=None, raiseonfail=False):
"""This deletes a file from S3.
Parameters
----------
bucket : str
The AWS S3 bucket to delete the file from.
filename : str
The full file name of the file to delete, including any prefixes.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str or None
If the file was successfully deleted, will return the delete-marker
(https://docs.aws.amazon.com/AmazonS3/latest/dev/DeleteMarker.html). If
it wasn't, returns None
"""
if not client:
client = boto3.client('s3')
try:
resp = client.delete_object(Bucket=bucket, Key=filename)
if not resp:
LOGERROR('could not delete file %s from bucket %s' % (filename,
bucket))
else:
return resp['DeleteMarker']
except Exception as e:
LOGEXCEPTION('could not delete file %s from bucket %s' % (filename,
bucket))
if raiseonfail:
raise
return None
|
def sqs_create_queue(queue_name, options=None, client=None):
"""
This creates an SQS queue.
Parameters
----------
queue_name : str
The name of the queue to create.
options : dict or None
A dict of options indicate extra attributes the queue should have.
See the SQS docs for details. If None, no custom attributes will be
attached to the queue.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
dict
This returns a dict of the form::
{'url': SQS URL of the queue,
'name': name of the queue}
"""
if not client:
client = boto3.client('sqs')
try:
if isinstance(options, dict):
resp = client.create_queue(QueueName=queue_name, Attributes=options)
else:
resp = client.create_queue(QueueName=queue_name)
if resp is not None:
return {'url':resp['QueueUrl'],
'name':queue_name}
else:
LOGERROR('could not create the specified queue: %s with options: %s'
% (queue_name, options))
return None
except Exception as e:
LOGEXCEPTION('could not create the specified queue: %s with options: %s'
% (queue_name, options))
return None
|
def sqs_delete_queue(queue_url, client=None):
"""This deletes an SQS queue given its URL
Parameters
----------
queue_url : str
The SQS URL of the queue to delete.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
bool
True if the queue was deleted successfully. False otherwise.
"""
if not client:
client = boto3.client('sqs')
try:
client.delete_queue(QueueUrl=queue_url)
return True
except Exception as e:
LOGEXCEPTION('could not delete the specified queue: %s'
% (queue_url,))
return False
|
def sqs_put_item(queue_url,
item,
delay_seconds=0,
client=None,
raiseonfail=False):
"""This pushes a dict serialized to JSON to the specified SQS queue.
Parameters
----------
queue_url : str
The SQS URL of the queue to push the object to.
item : dict
The dict passed in here will be serialized to JSON.
delay_seconds : int
The amount of time in seconds the pushed item will be held before going
'live' and being visible to all queue consumers.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
boto3.Response or None
If the item was successfully put on the queue, will return the response
from the service. If it wasn't, will return None.
"""
if not client:
client = boto3.client('sqs')
try:
json_msg = json.dumps(item)
resp = client.send_message(
QueueUrl=queue_url,
MessageBody=json_msg,
DelaySeconds=delay_seconds,
)
if not resp:
LOGERROR('could not send item to queue: %s' % queue_url)
return None
else:
return resp
except Exception as e:
LOGEXCEPTION('could not send item to queue: %s' % queue_url)
if raiseonfail:
raise
return None
|
def sqs_get_item(queue_url,
max_items=1,
wait_time_seconds=5,
client=None,
raiseonfail=False):
"""This gets a single item from the SQS queue.
The `queue_url` is composed of some internal SQS junk plus a
`queue_name`. For our purposes (`lcproc_aws.py`), the queue name will be
something like::
lcproc_queue_<action>
where action is one of::
runcp
runpf
The item is always a JSON object::
{'target': S3 bucket address of the file to process,
'action': the action to perform on the file ('runpf', 'runcp', etc.)
'args': the action's args as a tuple (not including filename, which is
generated randomly as a temporary local file),
'kwargs': the action's kwargs as a dict,
'outbucket: S3 bucket to write the result to,
'outqueue': SQS queue to write the processed item's info to (optional)}
The action MUST match the <action> in the queue name for this item to be
processed.
Parameters
----------
queue_url : str
The SQS URL of the queue to get messages from.
max_items : int
The number of items to pull from the queue in this request.
wait_time_seconds : int
This specifies how long the function should block until a message is
received on the queue. If the timeout expires, an empty list will be
returned. If the timeout doesn't expire, the function will return a list
of items received (up to `max_items`).
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
list of dicts or None
For each item pulled from the queue in this request (up to `max_items`),
a dict will be deserialized from the retrieved JSON, containing the
message items and various metadata. The most important item of the
metadata is the `receipt_handle`, which can be used to acknowledge
receipt of all items in this request (see `sqs_delete_item` below).
If the queue pull fails outright, returns None. If no messages are
available for this queue pull, returns an empty list.
"""
if not client:
client = boto3.client('sqs')
try:
resp = client.receive_message(
QueueUrl=queue_url,
AttributeNames=['All'],
MaxNumberOfMessages=max_items,
WaitTimeSeconds=wait_time_seconds
)
if not resp:
LOGERROR('could not receive messages from queue: %s' %
queue_url)
else:
messages = []
for msg in resp.get('Messages',[]):
try:
messages.append({
'id':msg['MessageId'],
'receipt_handle':msg['ReceiptHandle'],
'md5':msg['MD5OfBody'],
'attributes':msg['Attributes'],
'item':json.loads(msg['Body']),
})
except Exception as e:
LOGEXCEPTION(
'could not deserialize message ID: %s, body: %s' %
(msg['MessageId'], msg['Body'])
)
continue
return messages
except Exception as e:
LOGEXCEPTION('could not get items from queue: %s' % queue_url)
if raiseonfail:
raise
return None
|
def sqs_delete_item(queue_url,
receipt_handle,
client=None,
raiseonfail=False):
"""This deletes a message from the queue, effectively acknowledging its
receipt.
Call this only when all messages retrieved from the queue have been
processed, since this will prevent redelivery of these messages to other
queue workers pulling fromn the same queue channel.
Parameters
----------
queue_url : str
The SQS URL of the queue where we got the messages from. This should be
the same queue used to retrieve the messages in `sqs_get_item`.
receipt_handle : str
The receipt handle of the queue message that we're responding to, and
will acknowledge receipt of. This will be present in each message
retrieved using `sqs_get_item`.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
Nothing.
"""
if not client:
client = boto3.client('sqs')
try:
client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
LOGEXCEPTION(
'could not delete message with receipt handle: '
'%s from queue: %s' % (receipt_handle, queue_url)
)
if raiseonfail:
raise
|
def make_ec2_nodes(
security_groupid,
subnet_id,
keypair_name,
iam_instance_profile_arn,
launch_instances=1,
ami='ami-04681a1dbd79675a5',
instance='t3.micro',
ebs_optimized=True,
user_data=None,
wait_until_up=True,
client=None,
raiseonfail=False,
):
"""This makes new EC2 worker nodes.
This requires a security group ID attached to a VPC config and subnet, a
keypair generated beforehand, and an IAM role ARN for the instance. See:
https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html
Use `user_data` to launch tasks on instance launch.
Parameters
----------
security_groupid : str
The security group ID of the AWS VPC where the instances will be
launched.
subnet_id : str
The subnet ID of the AWS VPC where the instances will be
launched.
keypair_name : str
The name of the keypair to be used to allow SSH access to all instances
launched here. This corresponds to an already downloaded AWS keypair PEM
file.
iam_instance_profile_arn : str
The ARN string corresponding to the AWS instance profile that describes
the permissions the launched instances have to access other AWS
resources. Set this up in AWS IAM.
launch_instances : int
The number of instances to launch in this request.
ami : str
The Amazon Machine Image ID that describes the OS the instances will use
after launch. The default ID is Amazon Linux 2 in the US East region.
instance : str
The instance type to launch. See the following URL for a list of IDs:
https://aws.amazon.com/ec2/pricing/on-demand/
ebs_optimized : bool
If True, will enable EBS optimization to speed up IO. This is usually
True for all instances made available in the last couple of years.
user_data : str or None
This is either the path to a file on disk that contains a shell-script
or a string containing a shell-script that will be executed by root
right after the instance is launched. Use to automatically set up
workers and queues. If None, will not execute anything at instance
start up.
wait_until_up : bool
If True, will not return from this function until all launched instances
are verified as running by AWS.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
dict
Returns launched instance info as a dict, keyed by instance ID.
"""
if not client:
client = boto3.client('ec2')
# get the user data from a string or a file
# note: boto3 will base64 encode this itself
if isinstance(user_data, str) and os.path.exists(user_data):
with open(user_data,'r') as infd:
udata = infd.read()
elif isinstance(user_data, str):
udata = user_data
else:
udata = (
'#!/bin/bash\necho "No user data provided. '
'Launched instance at: %s UTC"' % datetime.utcnow().isoformat()
)
# fire the request
try:
resp = client.run_instances(
ImageId=ami,
InstanceType=instance,
SecurityGroupIds=[
security_groupid,
],
SubnetId=subnet_id,
UserData=udata,
IamInstanceProfile={'Arn':iam_instance_profile_arn},
InstanceInitiatedShutdownBehavior='terminate',
KeyName=keypair_name,
MaxCount=launch_instances,
MinCount=launch_instances,
EbsOptimized=ebs_optimized,
)
if not resp:
LOGERROR('could not launch requested instance')
return None
else:
instance_dict = {}
instance_list = resp.get('Instances',[])
if len(instance_list) > 0:
for instance in instance_list:
LOGINFO('launched instance ID: %s of type: %s at: %s. '
'current state: %s'
% (instance['InstanceId'],
instance['InstanceType'],
instance['LaunchTime'].isoformat(),
instance['State']['Name']))
instance_dict[instance['InstanceId']] = {
'type':instance['InstanceType'],
'launched':instance['LaunchTime'],
'state':instance['State']['Name'],
'info':instance
}
# if we're waiting until we're up, then do so
if wait_until_up:
ready_instances = []
LOGINFO('waiting until launched instances are up...')
ntries = 5
curr_try = 0
while ( (curr_try < ntries) or
( len(ready_instances) <
len(list(instance_dict.keys()))) ):
resp = client.describe_instances(
InstanceIds=list(instance_dict.keys()),
)
if len(resp['Reservations']) > 0:
for resv in resp['Reservations']:
if len(resv['Instances']) > 0:
for instance in resv['Instances']:
if instance['State']['Name'] == 'running':
ready_instances.append(
instance['InstanceId']
)
instance_dict[
instance['InstanceId']
]['state'] = 'running'
instance_dict[
instance['InstanceId']
]['ip'] = instance['PublicIpAddress']
instance_dict[
instance['InstanceId']
]['info'] = instance
# sleep for a bit so we don't hit the API too often
curr_try = curr_try + 1
time.sleep(5.0)
if len(ready_instances) == len(list(instance_dict.keys())):
LOGINFO('all instances now up.')
else:
LOGWARNING(
'reached maximum number of tries for instance status, '
'not all instances may be up.'
)
return instance_dict
except ClientError as e:
LOGEXCEPTION('could not launch requested instance')
if raiseonfail:
raise
return None
except Exception as e:
LOGEXCEPTION('could not launch requested instance')
if raiseonfail:
raise
return None
|
def delete_ec2_nodes(
instance_id_list,
client=None
):
"""This deletes EC2 nodes and terminates the instances.
Parameters
----------
instance_id_list : list of str
A list of EC2 instance IDs to terminate.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
Nothing.
"""
if not client:
client = boto3.client('ec2')
resp = client.terminate_instances(
InstanceIds=instance_id_list
)
return resp
|
def make_spot_fleet_cluster(
security_groupid,
subnet_id,
keypair_name,
iam_instance_profile_arn,
spot_fleet_iam_role,
target_capacity=20,
spot_price=0.4,
expires_days=7,
allocation_strategy='lowestPrice',
instance_types=SPOT_INSTANCE_TYPES,
instance_weights=None,
instance_ami='ami-04681a1dbd79675a5',
instance_user_data=None,
instance_ebs_optimized=True,
wait_until_up=True,
client=None,
raiseonfail=False
):
"""This makes an EC2 spot-fleet cluster.
This requires a security group ID attached to a VPC config and subnet, a
keypair generated beforehand, and an IAM role ARN for the instance. See:
https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html
Use `user_data` to launch tasks on instance launch.
Parameters
----------
security_groupid : str
The security group ID of the AWS VPC where the instances will be
launched.
subnet_id : str
The subnet ID of the AWS VPC where the instances will be
launched.
keypair_name : str
The name of the keypair to be used to allow SSH access to all instances
launched here. This corresponds to an already downloaded AWS keypair PEM
file.
iam_instance_profile_arn : str
The ARN string corresponding to the AWS instance profile that describes
the permissions the launched instances have to access other AWS
resources. Set this up in AWS IAM.
spot_fleet_iam_role : str
This is the name of AWS IAM role that allows the Spot Fleet Manager to
scale up and down instances based on demand and instances failing,
etc. Set this up in IAM.
target_capacity : int
The number of instances to target in the fleet request. The fleet
manager service will attempt to maintain this number over the lifetime
of the Spot Fleet Request.
spot_price : float
The bid price in USD for the instances. This is per hour. Keep this at
about half the hourly on-demand price of the desired instances to make
sure your instances aren't taken away by AWS when it needs capacity.
expires_days : int
The number of days this request is active for. All instances launched by
this request will live at least this long and will be terminated
automatically after.
allocation_strategy : {'lowestPrice', 'diversified'}
The allocation strategy used by the fleet manager.
instance_types : list of str
List of the instance type to launch. See the following URL for a list of
IDs: https://aws.amazon.com/ec2/pricing/on-demand/
instance_weights : list of float or None
If `instance_types` is a list of different instance types, this is the
relative weight applied towards launching each instance type. This can
be used to launch a mix of instances in a defined ratio among their
types. Doing this can make the spot fleet more resilient to AWS taking
back the instances if it runs out of capacity.
instance_ami : str
The Amazon Machine Image ID that describes the OS the instances will use
after launch. The default ID is Amazon Linux 2 in the US East region.
instance_user_data : str or None
This is either the path to a file on disk that contains a shell-script
or a string containing a shell-script that will be executed by root
right after the instance is launched. Use to automatically set up
workers and queues. If None, will not execute anything at instance
start up.
instance_ebs_optimized : bool
If True, will enable EBS optimization to speed up IO. This is usually
True for all instances made available in the last couple of years.
wait_until_up : bool
If True, will not return from this function until the spot fleet request
is acknowledged by AWS.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str or None
This is the spot fleet request ID if successful. Otherwise, returns
None.
"""
fleetconfig = copy.deepcopy(SPOT_FLEET_CONFIG)
fleetconfig['IamFleetRole'] = spot_fleet_iam_role
fleetconfig['AllocationStrategy'] = allocation_strategy
fleetconfig['TargetCapacity'] = target_capacity
fleetconfig['SpotPrice'] = str(spot_price)
fleetconfig['ValidUntil'] = (
datetime.utcnow() + timedelta(days=expires_days)
).strftime(
'%Y-%m-%dT%H:%M:%SZ'
)
# get the user data from a string or a file
# we need to base64 encode it here
if (isinstance(instance_user_data, str) and
os.path.exists(instance_user_data)):
with open(instance_user_data,'rb') as infd:
udata = base64.b64encode(infd.read()).decode()
elif isinstance(instance_user_data, str):
udata = base64.b64encode(instance_user_data.encode()).decode()
else:
udata = (
'#!/bin/bash\necho "No user data provided. '
'Launched instance at: %s UTC"' % datetime.utcnow().isoformat()
)
udata = base64.b64encode(udata.encode()).decode()
for ind, itype in enumerate(instance_types):
thisinstance = SPOT_PERINSTANCE_CONFIG.copy()
thisinstance['InstanceType'] = itype
thisinstance['ImageId'] = instance_ami
thisinstance['SubnetId'] = subnet_id
thisinstance['KeyName'] = keypair_name
thisinstance['IamInstanceProfile']['Arn'] = iam_instance_profile_arn
thisinstance['SecurityGroups'][0] = {'GroupId':security_groupid}
thisinstance['UserData'] = udata
thisinstance['EbsOptimized'] = instance_ebs_optimized
# get the instance weights
if isinstance(instance_weights, list):
thisinstance['WeightedCapacity'] = instance_weights[ind]
fleetconfig['LaunchSpecifications'].append(thisinstance)
#
# launch the fleet
#
if not client:
client = boto3.client('ec2')
try:
resp = client.request_spot_fleet(
SpotFleetRequestConfig=fleetconfig,
)
if not resp:
LOGERROR('spot fleet request failed.')
return None
else:
spot_fleet_reqid = resp['SpotFleetRequestId']
LOGINFO('spot fleet requested successfully. request ID: %s' %
spot_fleet_reqid)
if not wait_until_up:
return spot_fleet_reqid
else:
ntries = 10
curr_try = 0
while curr_try < ntries:
resp = client.describe_spot_fleet_requests(
SpotFleetRequestIds=[
spot_fleet_reqid
]
)
curr_state = resp.get('SpotFleetRequestConfigs',[])
if len(curr_state) > 0:
curr_state = curr_state[0]['SpotFleetRequestState']
if curr_state == 'active':
LOGINFO('spot fleet with reqid: %s is now active' %
spot_fleet_reqid)
break
LOGINFO(
'spot fleet not yet active, waiting 15 seconds. '
'try %s/%s' % (curr_try, ntries)
)
curr_try = curr_try + 1
time.sleep(15.0)
return spot_fleet_reqid
except ClientError as e:
LOGEXCEPTION('could not launch spot fleet')
if raiseonfail:
raise
return None
except Exception as e:
LOGEXCEPTION('could not launch spot fleet')
if raiseonfail:
raise
return None
|
def delete_spot_fleet_cluster(
spot_fleet_reqid,
client=None,
):
"""
This deletes a spot-fleet cluster.
Parameters
----------
spot_fleet_reqid : str
The fleet request ID returned by `make_spot_fleet_cluster`.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
Nothing.
"""
if not client:
client = boto3.client('ec2')
resp = client.cancel_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_reqid],
TerminateInstances=True
)
return resp
|
def gcs_get_file(bucketname,
filename,
local_file,
altexts=None,
client=None,
service_account_json=None,
raiseonfail=False):
"""This gets a single file from a Google Cloud Storage bucket.
Parameters
----------
bucketname : str
The name of the GCS bucket to download the file from.
filename : str
The full name of the file to download, including all prefixes.
local_file : str
Path to where the downloaded file will be stored.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : google.cloud.storage.Client instance
The instance of the Client to use to perform the download operation. If
this is None, a new Client will be used. If this is None and
`service_account_json` points to a downloaded JSON file with GCS
credentials, a new Client with the provided credentials will be used. If
this is not None, the existing Client instance will be used.
service_account_json : str
Path to a downloaded GCS credentials JSON file.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful.
"""
if not client:
if (service_account_json is not None and
os.path.exists(service_account_json)):
client = storage.Client.from_service_account_json(
service_account_json
)
else:
client = storage.Client()
try:
bucket = client.get_bucket(bucketname)
blob = bucket.get_blob(filename)
blob.download_to_filename(local_file)
return local_file
except Exception as e:
for alt_extension in altexts:
split_ext = os.path.splitext(filename)
check_file = split_ext[0] + alt_extension
try:
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(check_file)
blob.download_to_filename(
local_file.replace(split_ext[-1],
alt_extension)
)
return local_file.replace(split_ext[-1],
alt_extension)
except Exception as e:
pass
else:
LOGEXCEPTION('could not download gs://%s/%s' % (bucket, filename))
if raiseonfail:
raise
return None
|
def gcs_get_url(url,
altexts=None,
client=None,
service_account_json=None,
raiseonfail=False):
"""This gets a single file from a Google Cloud Storage bucket.
This uses the gs:// URL instead of a bucket name and key.
Parameters
----------
url : str
GCS URL to download. This should begin with 'gs://'.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : google.cloud.storage.Client instance
The instance of the Client to use to perform the download operation. If
this is None, a new Client will be used. If this is None and
`service_account_json` points to a downloaded JSON file with GCS
credentials, a new Client with the provided credentials will be used. If
this is not None, the existing Client instance will be used.
service_account_json : str
Path to a downloaded GCS credentials JSON file.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful.
"""
bucket_item = url.replace('gs://','')
bucket_item = bucket_item.split('/')
bucket = bucket_item[0]
filekey = '/'.join(bucket_item[1:])
return gcs_get_file(bucket,
filekey,
bucket_item[-1],
altexts=altexts,
client=client,
service_account_json=service_account_json,
raiseonfail=raiseonfail)
|
def gcs_put_file(local_file,
bucketname,
service_account_json=None,
client=None,
raiseonfail=False):
"""This puts a single file into a Google Cloud Storage bucket.
Parameters
----------
local_file : str
Path to the file to upload to GCS.
bucket : str
The GCS bucket to upload the file to.
service_account_json : str
Path to a downloaded GCS credentials JSON file.
client : google.cloud.storage.Client instance
The instance of the Client to use to perform the download operation. If
this is None, a new Client will be used. If this is None and
`service_account_json` points to a downloaded JSON file with GCS
credentials, a new Client with the provided credentials will be used. If
this is not None, the existing Client instance will be used.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str or None
If the file upload is successful, returns the gs:// URL of the uploaded
file. If it failed, will return None.
"""
if not client:
if (service_account_json is not None and
os.path.exists(service_account_json)):
client = storage.Client.from_service_account_json(
service_account_json
)
else:
client = storage.Client()
try:
bucket = client.get_bucket(bucketname)
remote_blob = bucket.blob(local_file)
remote_blob.upload_from_filename(local_file)
return 'gs://%s/%s' % (bucketname, local_file.lstrip('/'))
except Exception as e:
LOGEXCEPTION('could not upload %s to bucket %s' % (local_file,
bucket))
if raiseonfail:
raise
return None
|
def read_hatlc(hatlc):
'''
This reads a consolidated HAT LC written by the functions above.
Returns a dict.
'''
lcfname = os.path.basename(hatlc)
# unzip the files first
if '.gz' in lcfname:
lcf = gzip.open(hatlc,'rb')
elif '.bz2' in lcfname:
lcf = bz2.BZ2File(hatlc, 'rb')
else:
lcf = open(hatlc,'rb')
if '.fits' in lcfname and HAVEPYFITS:
hdulist = pyfits.open(lcf)
objectinfo = hdulist[0].header
objectlc = hdulist[1].data
lccols = objectlc.columns.names
hdulist.close()
lcf.close()
lcdict = {}
for col in lccols:
lcdict[col] = np.array(objectlc[col])
lcdict['hatid'] = objectinfo['hatid']
lcdict['twomassid'] = objectinfo['2massid']
lcdict['ra'] = objectinfo['ra']
lcdict['dec'] = objectinfo['dec']
lcdict['mags'] = [objectinfo[x] for x in ('vmag','rmag','imag',
'jmag','hmag','kmag')]
lcdict['ndet'] = objectinfo['ndet']
lcdict['hatstations'] = objectinfo['hats']
lcdict['filters'] = objectinfo['filters']
lcdict['columns'] = lccols
return lcdict
elif '.fits' in lcfname and not HAVEPYFITS:
print("can't read %s since we don't have the pyfits module" % lcfname)
return
elif '.csv' in lcfname or '.hatlc' in lcfname:
lcflines = lcf.read().decode().split('\n')
lcf.close()
# now process the read-in LC
objectdata = [x for x in lcflines if x.startswith('#')]
objectlc = [x for x in lcflines if not x.startswith('#')]
objectlc = [x for x in objectlc if len(x) > 1]
if '.csv' in lcfname:
objectlc = [x.split(',') for x in objectlc]
else:
objectlc = [x.split() for x in objectlc]
# transpose split rows to get columns
objectlc = list(zip(*objectlc))
# read the header to figure out the object's info and column names
objectdata = [x.strip('#') for x in objectdata]
objectdata = [x.strip() for x in objectdata]
objectdata = [x for x in objectdata if len(x) > 0]
hatid, twomassid = objectdata[0].split(' - ')
ra, dec = objectdata[1].split(', ')
ra = float(ra.split(' = ')[-1].strip(' deg'))
dec = float(dec.split(' = ')[-1].strip(' deg'))
vmag, rmag, imag, jmag, hmag, kmag = objectdata[2].split(', ')
vmag = float(vmag.split(' = ')[-1])
rmag = float(rmag.split(' = ')[-1])
imag = float(imag.split(' = ')[-1])
jmag = float(jmag.split(' = ')[-1])
hmag = float(hmag.split(' = ')[-1])
kmag = float(kmag.split(' = ')[-1])
ndet = int(objectdata[3].split(': ')[-1])
hatstations = objectdata[4].split(': ')[-1]
filterhead_ind = objectdata.index('Filters used:')
columnhead_ind = objectdata.index('Columns:')
filters = objectdata[filterhead_ind:columnhead_ind]
columndefs = objectdata[columnhead_ind+1:]
columns = []
for line in columndefs:
colnum, colname, coldesc = line.split(' - ')
columns.append(colname)
lcdict = {}
# now write all the columns to the output dictionary
for ind, col in enumerate(columns):
# this formats everything nicely using our existing column
# definitions
lcdict[col] = np.array([TEXTLC_OUTPUT_COLUMNS[col][3](x)
for x in objectlc[ind]])
# write the object metadata to the output dictionary
lcdict['hatid'] = hatid
lcdict['twomassid'] = twomassid.replace('2MASS J','')
lcdict['ra'] = ra
lcdict['dec'] = dec
lcdict['mags'] = [vmag, rmag, imag, jmag, hmag, kmag]
lcdict['ndet'] = ndet
lcdict['hatstations'] = hatstations.split(', ')
lcdict['filters'] = filters[1:]
lcdict['cols'] = columns
return lcdict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.