prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
import gc
import os
import pickle
import fire
import h5py
import matplotlib.pyplot as plt
import seaborn as sns
from hyperopt.fmin import generate_trials_to_calculate
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_recall_curve
from numpy import linalg as LA
import sklearn.metrics as metrics
import json
import lightgbm as lgb
import numpy as np
import pandas as pd
import glob
from sklearn.preprocessing import QuantileTransformer
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from sklearn.metrics import average_precision_score
from early_stopping_avg import early_stopping
from hyperopt import STATUS_OK
from hyperopt import hp
from timeit import default_timer as timer
import numpy as np
from hyperopt import tpe
from hyperopt import Trials
from hyperopt import fmin
def focal_isoform_binary_object(pred, dtrain, alpha=0.5, beta=0.0, gamma=2.0):
# alpha controls weight of positives
# (0,1) less
# >1 more or(0-0.5 less, 0.5-1 more)
# beta controls the shift of loss function
# >0 to left(less weight to well-trained samples)
# gamma controls the steepness of loss function
# >0
label = dtrain.get_label()
x = beta + (2.0 * label - 1) * gamma * pred
p = 1. / (1. + np.exp(-x))
# grad = (1 + (alpha - 1) * label) * (2 * label - 1) * (p - 1)
grad = (1 - label + (label * 2 - 1) * alpha) * (2 * label - 1) * (p - 1)
# hess = (1 + (alpha - 1) * label) * gamma * (1 - p) * p
hess = (1 - label + (label * 2 - 1) * alpha) * gamma * (1 - p) * p
return grad, hess
def lgb_auprc_score(y_hat, data):
y_true = data.get_label()
# TODO try not to round yhat
# y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'auprc', average_precision_score(y_true, y_hat), True
class LightGBMModel(object):
def __init__(self, config_file, training_tf_name=None,
cofactor_motif_set_file=None, quantile_transformer_path=None, dnase_feature_path=None,
motif_feature_path=None, selected_motif_feature_path=None, step=120):
with open(config_file, "r") as infile:
config = yaml.load(infile, Loader=Loader)
self.config = config
self.chrom_all = config['chrom_all']
self.region_topic_model_h5 = config['region_topic_model_h5']
self.dic_chrom_length = {}
self.chrom_sets = config['chrom_sets']
self.training_tf_name = training_tf_name
self.dic_chrom_length = {}
with open(config['chrom_size_file'], "r") as infile:
for line in infile:
line = line.strip().split("\t")
if line[0] in self.chrom_all:
self.dic_chrom_length[line[0]] = int(line[1])
# if regions_all_file is not None:
# self.df_all_regions = pd.read_csv(regions_all_file, sep="\t", header=None)
# self.df_all_regions.columns = ['chr', 'start', 'stop']
# else:
# self.df_all_regions = None
if training_tf_name is not None:
self.df_all_regions_label = pd.read_csv(
"%s/%s.%s" % (
config['training_cell_types_regions_label_path'], training_tf_name,
config['training_cell_types_regions_label_name']),
sep="\t", header=0)
else:
self.df_all_regions_label = None
if cofactor_motif_set_file is not None:
with open(cofactor_motif_set_file, "r") as infile:
self.cofactor_motif_set = json.load(infile)
else:
self.cofactor_motif_set = None
if quantile_transformer_path is None:
self.quantile_transformer_path = "./train/quantile_transformer"
else:
self.quantile_transformer_path = quantile_transformer_path
if dnase_feature_path is None:
self.dnase_feature_path = "./hdf5s/DNase"
else:
self.dnase_feature_path = dnase_feature_path
if motif_feature_path is None:
self.motif_feature_path = "./hdf5s/motif"
else:
self.motif_feature_path = motif_feature_path
if selected_motif_feature_path is None:
self.selected_motif_feature_path = "./hdf5s/motif"
else:
self.selected_motif_feature_path = selected_motif_feature_path
self.step = step
def prepare_motif_h5_data(self, chrom):
df_temp = self.df_all_regions_label[self.df_all_regions_label['chr'] == chrom].copy()
df_temp = df_temp.iloc[:, :3]
with h5py.File("%s/%s_motifs_top4_scores.h5" % (self.motif_feature_path, chrom), "r") as infile:
motif_names = infile['motif_names'][...]
motif_names = list(map(lambda x: x.decode('UTF-8'), motif_names))
# if selected_tfs is None:
# selected_tfs = motif_names
# selected_tfs=["EGR","KLF","SPI",'ETV','ZNF','GABP']
# row_indexs = [i for i, v in enumerate(motif_names) if any([tf_name in v for tf_name in selected_tfs])]
# selected_tfs_names = [v for i, v in enumerate(motif_names) if
# any([tf_name in v for tf_name in selected_tfs])]
row_index = [i for i, v in enumerate(motif_names) if v in self.cofactor_motif_set]
selected_motifs = [motif_names[i] for i in row_index]
# print(row_index)
scores = infile["scores"][row_index, :, :]
# for i in [-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13]:
for i in [-7, -5, -3, -1, 0, 1, 3, 5, 7]:
# print("%s %d" % (chrom, i))
region_index = np.array(list(map(lambda x: x / 50 + i, df_temp["start"])))
region_index = np.clip(region_index, 0, scores.shape[1] - 1)
scores_region = scores[:, region_index.astype(int), :]
for ind, j in enumerate(selected_motifs):
# for ind, j in enumerate(self.cofactor_motif_set):
for k in range(4):
df_temp["%s_offset_%d_top_%d" % (j, i, k)] = scores_region[ind, :, k]
with h5py.File('%s/%s_motif_features_lightGBM.h5' % (self.selected_motif_feature_path, chrom), "w") as outfile:
outfile.create_dataset("feature_names", data=np.array(df_temp.iloc[:, 3:].columns, dtype='S'),
shape=(df_temp.shape[1] - 3,),
dtype='S200', compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("starts", data=df_temp['start'].tolist(), shape=(df_temp.shape[0],),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("scores", data=df_temp.iloc[:, 3:].values, dtype=np.float32,
shape=(df_temp.shape[0], df_temp.shape[1] - 3),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=4)
def prepare_dnase_autoencoder_h5_data(self, cell_line, chrom, outfile_path):
df_temp = self.df_all_regions_label[self.df_all_regions_label['chr'] == chrom].copy()
df_temp = df_temp.iloc[:, :3]
# for cell_line in self.config['cell_types']:
with h5py.File(
"%s/DNASE.%s.merge.binSize.1.corrected_sorted_hg19_25bpbin_bwaverage_transformed_%s_scanned_with_autoencoder_v4.hdf5" % (
'/n/scratchlfs/xiaoleliu_lab/cchen/Cistrome_imputation/encode/data/DNase_scanning/scan_result',
cell_line, chrom), "r") as infile:
# print(row_index)
scores = infile["DNase_feature_scanning"][:, :]
# for i in [-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13]:
for i in [-12, -8, -4, 0, 4, 8, 12]:
# print("%s %d" % (chrom, i))
region_index = np.array(list(map(lambda x: x / 50 + i, df_temp["start"])))
region_index = np.clip(region_index, 0, scores.shape[0] - 1)
scores_region = scores[region_index.astype(int), :]
for k in range(32):
df_temp["DNase_autoencoder_offset_%d_%d" % (i, k)] = scores_region[:, k]
with h5py.File('%s/DNASE_autoencoder_lightGBM.%s.%s.h5' % (outfile_path, chrom, cell_line), "w") as outfile:
outfile.create_dataset("feature_names", data=np.array(df_temp.iloc[:, 3:].columns, dtype='S'),
shape=(df_temp.shape[1] - 3,),
dtype='S200', compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("starts", data=df_temp['start'].tolist(), shape=(df_temp.shape[0],),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("scores", data=df_temp.iloc[:, 3:].values, dtype=np.float32,
shape=(df_temp.shape[0], df_temp.shape[1] - 3),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=4)
def get_dnase_features(self, cell_line, chrom, dir_dnase_feature_median, selected_bin_index_file=None):
with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
self.dnase_feature_path, chrom), "r") as infile:
samples = list(infile['samples'][...])
cell_line = str(cell_line)
samples = list(map(lambda x: x.decode('UTF-8'), samples))
cell_line_index = np.where(np.array(samples) == cell_line)[0][0]
if selected_bin_index_file is None:
cell_line_scores = infile[chrom][cell_line_index, :, :]
else:
selected_bin_index = np.load(selected_bin_index_file)
cell_line_scores = infile[chrom][cell_line_index, selected_bin_index, :]
# with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_median.h5" % (
# dir_dnase_feature_median, chrom), "r") as infile:
# if selected_bin_index_file is None:
# median_scores = infile[chrom][:, :]
# else:
# selected_bin_index = np.load(selected_bin_index_file)
# median_scores = infile[chrom][selected_bin_index, :]
# scores = np.hstack((cell_line_scores, median_scores))
# return scores
return cell_line_scores
def get_dnase_features_autoencoder(self, cell_line, chrom, feature_path, selected_bin_index_file=None):
with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
self.dnase_feature_path, chrom), "r") as infile:
size = infile[chrom].shape[1]
with h5py.File("%s/DNASE_autoencoder_lightGBM.%s.%s.h5" % (feature_path, chrom, cell_line), "r") as infile:
if selected_bin_index_file is None:
cell_line_scores = infile['scores'][:size, :]
else:
selected_bin_index = np.load(selected_bin_index_file)
cell_line_scores = infile['scores'][:size, :]
# with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_median.h5" % (
# dir_dnase_feature_median, chrom), "r") as infile:
# if selected_bin_index_file is None:
# median_scores = infile[chrom][:, :]
# else:
# selected_bin_index = np.load(selected_bin_index_file)
# median_scores = infile[chrom][selected_bin_index, :]
# scores = np.hstack((cell_line_scores, median_scores))
# return scores
return cell_line_scores
def prepare_lightgbm_binary_dnase_feature(self, cell_line, chrom_set_name, dir_dnase_feature_median, dir_out,
reference=None, selected_bin_index_file=None, ATAC_long_short=False):
cell_line = str(cell_line)
chrom = "chr19"
# TODO change to 50bp or 100bp
with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
self.dnase_feature_path, chrom), "r") as infile:
needed_feature_names = list(infile['feature_names'][...])
needed_feature_names = list(map(lambda x: x.decode('UTF-8'), needed_feature_names))
# with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_median.h5" % (
# # dir_dnase_feature_median, chrom), "r") as infile:
# # median_feature_names = list(infile['feature_names'][...])
# # median_feature_names = list(map(lambda x: x.decode('UTF-8'), median_feature_names))
# # needed_feature_names += median_feature_names
list_scores = []
chrom_set = self.chrom_sets[chrom_set_name]
if not ATAC_long_short:
for chrom in chrom_set:
scores = self.get_dnase_features(cell_line, chrom, dir_dnase_feature_median, selected_bin_index_file)
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
# TODO change to 50bp or 100bp
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, cell_line),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
_ = qt.transform(all_score)
# _ = qt.transform(all_score[:, :int(all_score.shape[1] / 2)])
# _ = qt.transform(all_score[:, :cell_line_scores.shape[1]])
if reference is not None:
# reference = lgb.Dataset(glob.glob("%s/lightGBM.dnase.*.*.bin" % reference)[0])
reference = lgb.Dataset(reference)
train_data = lgb.Dataset(all_score, feature_name=list(needed_feature_names), reference=reference)
else:
list_scores_short_long = []
for frag_size in ['short','long']:
list_scores = []
for chrom in chrom_set:
scores = self.get_dnase_features("%s_%s" % (cell_line, frag_size), chrom, dir_dnase_feature_median,
selected_bin_index_file)
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
# TODO change to 50bp or 100bp
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, "%s_%s" % (cell_line, frag_size)),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
_ = qt.transform(all_score)
# _ = qt.transform(all_score[:, :int(all_score.shape[1] / 2)])
# _ = qt.transform(all_score[:, :cell_line_scores.shape[1]])
list_scores_short_long.append(all_score)
all_score_short_long = np.hstack(list_scores_short_long)
if reference is not None:
# reference = lgb.Dataset(glob.glob("%s/lightGBM.dnase.*.*.bin" % reference)[0])
reference = lgb.Dataset(reference)
needed_feature_names_short_long = ['%s_%s' % (feature_name, frag_size) for frag_size in ['short','long']
for feature_name in needed_feature_names
]
train_data = lgb.Dataset(all_score_short_long, feature_name=list(needed_feature_names_short_long), reference=reference)
train_data.save_binary("%s/lightGBM.dnase.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
def prepare_lightgbm_binary_dnase_feature_autoencoder(self, cell_line, chrom_set_name, feature_path,
dir_out,
reference=None, selected_bin_index_file=None):
list_scores = []
chrom_set = self.chrom_sets[chrom_set_name]
for chrom in chrom_set:
scores = self.get_dnase_features_autoencoder(cell_line, chrom, feature_path,
selected_bin_index_file)
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
if reference is not None:
reference = lgb.Dataset(glob.glob("%s/lightGBM.autoencoder.dnase.*.*.bin" % reference)[0])
needed_feature_names = []
for i in [-12, -8, -4, 0, 4, 8, 12]:
for k in range(32):
needed_feature_names.append("DNase_autoencoder_offset_%d_%d" % (i, k))
train_data = lgb.Dataset(all_score, feature_name=list(needed_feature_names), reference=reference)
train_data.save_binary("%s/lightGBM.autoencoder.dnase.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
def prepare_lightgbm_binary_data_motif_feature_subset(self, chrom_set_name, subset_index, dir_out,
selected_bin_index_file=None, reference=None):
chrom = "chr19"
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
all_feature_names = list(infile['feature_names'][...])
all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
needed_feature_names = [all_feature_names[i:i + self.step]
for i in range(0, len(all_feature_names), self.step)][subset_index - 1]
feature_index = [list(range(i, min(i + self.step, len(all_feature_names)))) for i in
range(0, len(all_feature_names), self.step)][subset_index - 1]
# needed_feature_names = list(map(lambda x: x.decode('UTF-8'), needed_feature_names))
list_scores = []
chrom_set = self.chrom_sets[chrom_set_name]
with h5py.File(self.region_topic_model_h5, "r") as region_topic_infile:
for chrom in chrom_set:
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
# feature_names = list(infile['feature_names'][...])
# feature_names = list(map(lambda x: x.decode('UTF-8'), feature_names))
# feature_index = [i for i, v in enumerate(feature_names) if (v in needed_feature_names)]
if selected_bin_index_file is None:
scores = infile['scores'][:, feature_index]
if subset_index == 1:
scores = np.hstack([region_topic_infile[chrom][:, :], scores])
else:
selected_bin_index = np.load(selected_bin_index_file)
scores = infile['scores'][selected_bin_index, feature_index]
if subset_index == 1:
scores = np.hstack([region_topic_infile[chrom][selected_bin_index, :], scores])
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
if reference is not None:
reference = lgb.Dataset(glob.glob("%s/lightGBM.motif.*.%d.bin" % (reference, subset_index))[0])
if subset_index == 1:
# needed_feature_names = ["topic_%d" % topic_id for topic_id in range(9)] \
# + needed_feature_names
# train_data = lgb.Dataset(all_score, categorical_feature=[8],
# feature_name=list(needed_feature_names), reference=reference)
needed_feature_names = ["topic_%d" % topic_id for topic_id in range(1)] \
+ needed_feature_names
train_data = lgb.Dataset(all_score[:, 8:],
categorical_feature=[0],
feature_name=list(needed_feature_names), reference=reference)
else:
train_data = lgb.Dataset(all_score, feature_name=list(needed_feature_names), reference=reference)
train_data.save_binary("%s/lightGBM.motif.%s.%d.bin" % (dir_out, chrom_set_name, subset_index))
# def merge_lightgbm_binary_data(self, cell_line, chrom_set_name, dir_out):
# all_feature_names = []
# chrom = "chr22"
# # TODO change to 50bp or 100bp
# # with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
# # self.dnase_feature_path, chrom), "r") as infile:
# # all_feature_names += list(infile['feature_names'][...])
# # chrom = "chr22"
# with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
# "r") as infile:
# all_feature_names += list(infile['feature_names'][...])
# all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
# # for cell_line in self.df_all_regions_label.columns.tolist()[3:]:
# for cell_line in [cell_line]:
# train_data_all = None
# for subset_index in range(int(np.ceil(len(all_feature_names) / self.step) + 1)):
# train_data = lgb.Dataset("%s/lightGBM.%s.%s.%d.bin" %
# (dir_out, cell_line, chrom_set_name, subset_index)).construct()
# if train_data_all is None:
# train_data_all = train_data
# else:
# # train_data_all=train_data_all.add_features_from(train_data)
# train_data_all.add_features_from(train_data)
# # print(subset_index)
# train_data_all.save_binary("%s/lightGBM_all.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
# print(cell_line, chrom_set_name)
def merge_lightgbm_binary_data(self, cell_line, chrom_set_name, dir_out=None, lightgbm_dnase_binary_files_path=None,
lightgbm_motif_binary_files_path=None):
if dir_out is None:
dir_out = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_motif_binary_files_path is None:
lightgbm_motif_binary_files_path = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_dnase_binary_files_path is None:
lightgbm_dnase_binary_files_path = "./train/data/dnase_feature_binary_files"
cell_line = str(cell_line)
all_feature_names = []
chrom = "chr19"
# TODO change to 50bp or 100bp
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
all_feature_names += list(infile['feature_names'][...])
all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
train_data_all = lgb.Dataset("%s/lightGBM.dnase.%s.%s.bin" %
(lightgbm_dnase_binary_files_path, cell_line, chrom_set_name)).construct()
for subset_index in range(int(np.ceil(len(all_feature_names) / self.step))):
train_data = lgb.Dataset("%s/lightGBM.motif.%s.%d.bin" %
(lightgbm_motif_binary_files_path, chrom_set_name, subset_index + 1)).construct()
train_data_all.add_features_from(train_data)
temp = []
chrom_set = self.chrom_sets[chrom_set_name]
for chrom in chrom_set:
df_temp = self.df_all_regions_label.loc[self.df_all_regions_label['chr'] == chrom, :]
temp.append(df_temp)
df_all_temp = pd.concat(temp, ignore_index=True)
# selected_index = np.where(df_all_temp[cell_line] != "A")[0]
# ignore_index = np.where(df_all_temp[cell_line] == "A")[0]
# label_b_u = np.delete(np.array(df_all_temp[cell_line]), ignore_index, axis=0)
# labels = list(map(lambda x: 1 if x == "B" else 0, label_b_u))
# train_data_all_subset = train_data_all.subset(selected_index)
# train_data_all_subset.set_label(labels)
# return train_data_all_subset
weight = (np.array(df_all_temp[cell_line]) != "A").astype(int)
train_data_all.set_weight(weight)
labels = (np.array(df_all_temp[cell_line]) == "B").astype(int)
train_data_all.set_label(labels)
# return train_data_all
train_data_all.save_binary("%s/lightGBM.all.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
def merge_lightgbm_binary_data_autoencoder(self, cell_line, chrom_set_name, dir_out=None,
lightgbm_dnase_binary_files_path=None,
lightgbm_motif_binary_files_path=None):
if dir_out is None:
dir_out = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_motif_binary_files_path is None:
lightgbm_motif_binary_files_path = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_dnase_binary_files_path is None:
lightgbm_dnase_binary_files_path = "./train/data/dnase_feature_binary_files"
all_feature_names = []
chrom = "chr19"
# TODO change to 50bp or 100bp
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
all_feature_names += list(infile['feature_names'][...])
all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
train_data_all = lgb.Dataset("%s/lightGBM.autoencoder.dnase.%s.%s.bin" %
(lightgbm_dnase_binary_files_path, cell_line, chrom_set_name)).construct()
# train_data = lgb.Dataset("%s/lightGBM.dnase.%s.%s.bin" %
# (lightgbm_dnase_binary_files_path, cell_line, chrom_set_name)).construct()
# train_data_all.add_features_from(train_data)
for subset_index in range(int(np.ceil(len(all_feature_names) / self.step))):
train_data = lgb.Dataset("%s/lightGBM.motif.%s.%d.bin" %
(lightgbm_motif_binary_files_path, chrom_set_name, subset_index + 1)).construct()
train_data_all.add_features_from(train_data)
temp = []
chrom_set = self.chrom_sets[chrom_set_name]
for chrom in chrom_set:
df_temp = self.df_all_regions_label.loc[self.df_all_regions_label['chr'] == chrom, :]
temp.append(df_temp)
df_all_temp = pd.concat(temp, ignore_index=True)
# selected_index = np.where(df_all_temp[cell_line] != "A")[0]
# ignore_index = np.where(df_all_temp[cell_line] == "A")[0]
# label_b_u = np.delete(np.array(df_all_temp[cell_line]), ignore_index, axis=0)
# labels = list(map(lambda x: 1 if x == "B" else 0, label_b_u))
# train_data_all_subset = train_data_all.subset(selected_index)
# train_data_all_subset.set_label(labels)
# return train_data_all_subset
weight = (np.array(df_all_temp[cell_line]) != "A").astype(int)
train_data_all.set_weight(weight)
labels = (np.array(df_all_temp[cell_line]) == "B").astype(int)
train_data_all.set_label(labels)
# return train_data_all
train_data_all.save_binary("%s/lightGBM.autoencoder.all.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
def train_models(self, cell_line, chrom_set_name, lightgbm_dnase_binary_files_path=None,
lightgbm_motif_binary_files_path=None, dir_out=None, num_threads=16):
if lightgbm_motif_binary_files_path is None:
lightgbm_motif_binary_files_path = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_dnase_binary_files_path is None:
lightgbm_dnase_binary_files_path = "./train/data/dnase_feature_binary_files"
if dir_out is None:
dir_out = "./train/%s/models/" % self.training_tf_name
cell_line = str(cell_line)
params = {
'boosting_type': 'gbdt',
# 'boosting_type': 'dart',
# 'drop_rate': 0.3,
# 'max_drop': 50,
# 'skip_drop': 0.5,
# 'drop_seed': 6,
# 'pos_bagging_fraction': 1,
# 'neg_bagging_fraction': 0.01,
# 'bagging_freq': 10000,
# 'bagging_seed': 6,
'objective': 'binary',
# 'objective': focal_binary_object,
# 'metric': ['binary_error', 'binary_logloss', "auc"],
'metric': ["auc"],
# 'is_unbalance': True,
# "scale_pos_weight": 100,
'metric_freq': 10,
'num_leaves': 63,
# 'num_leaves': 7,
# 'max_bin': 255,
'num_threads': num_threads,
'learning_rate': 0.05,
'feature_fraction': 1,
'boost_from_average': False,
'verbose': 1
}
# other_set_type = list(set(self.chrom_sets.keys()) - {chrom_set_name})[0]
if len(self.df_all_regions_label.columns[3:]) > 1:
other_cell_lines = list(set(self.df_all_regions_label.columns[3:]) - {cell_line})
else:
other_cell_lines = [self.df_all_regions_label.columns[3]]
# train_data = self.merge_lightgbm_binary_data(cell_line, chrom_set_name, lightgbm_dnase_binary_files_path,
# lightgbm_motif_binary_files_path)
train_data = lgb.Dataset(
"%s/lightGBM.all.%s.%s.bin" % (lightgbm_motif_binary_files_path, cell_line, chrom_set_name))
list_validation_data = []
for other_cell_line in other_cell_lines:
# validation_data = self.merge_lightgbm_binary_data(other_cell_line, "chrom_set_test",
# lightgbm_dnase_binary_files_path,
# lightgbm_motif_binary_files_path)
validation_data = lgb.Dataset("%s/lightGBM.all.%s.%s.bin" % (
lightgbm_motif_binary_files_path, other_cell_line, "chrom_set_test"), reference=train_data)
list_validation_data.append(validation_data)
evals_result = {}
train_data = train_data.construct()
# see: https://arxiv.org/pdf/1909.04868.pdf
beta = - np.log10(2 * train_data.num_data()/np.where(train_data.get_label() > 0)[0].shape[0] - 1)
gbm = lgb.train(params=params,
train_set=train_data,
fobj=lambda x, y: focal_isoform_binary_object(x, y, alpha=0.5, beta=beta, gamma=1),
# fobj=lambda x,y:logistic_obj(x,y,imbalance_alpha=1.0),
valid_sets=[train_data] + list_validation_data,
valid_names=['train'] + ["%s_%s" % (other_cell_line, "set_test") \
for other_cell_line in other_cell_lines],
feval=lgb_auprc_score,
# early_stopping_rounds=20,
evals_result=evals_result,
num_boost_round=200,
keep_training_booster=False,
callbacks=[early_stopping(20, first_metric_only=False, verbose=True)]
)
with open("%s/%s.%s.%s_model.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name), 'wb') as fout:
pickle.dump(gbm, fout)
with open("%s/%s.%s.%s_evals_result.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name),
'wb') as outfile_evals_result:
pickle.dump(evals_result, outfile_evals_result, pickle.HIGHEST_PROTOCOL)
def train_models_hyperopt(self, cell_line, chrom_set_name, lightgbm_dnase_binary_files_path=None,
lightgbm_motif_binary_files_path=None, dir_out=None, num_threads=16):
if lightgbm_motif_binary_files_path is None:
lightgbm_motif_binary_files_path = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_dnase_binary_files_path is None:
lightgbm_dnase_binary_files_path = "./train/data/dnase_feature_binary_files"
if dir_out is None:
dir_out = "./train/%s/models/" % self.training_tf_name
# other_set_type = list(set(self.chrom_sets.keys()) - {chrom_set_name})[0]
if len(self.df_all_regions_label.columns[3:]) > 1:
other_cell_lines = list(set(self.df_all_regions_label.columns[3:]) - {cell_line})
else:
other_cell_lines = [self.df_all_regions_label.columns[3]]
# train_data = self.merge_lightgbm_binary_data(cell_line, chrom_set_name, lightgbm_dnase_binary_files_path,
# lightgbm_motif_binary_files_path)
train_data = lgb.Dataset(
"%s/lightGBM.all.%s.%s.bin" % (lightgbm_motif_binary_files_path, cell_line, chrom_set_name))
list_validation_data = []
for other_cell_line in other_cell_lines:
# validation_data = self.merge_lightgbm_binary_data(other_cell_line, "chrom_set_test",
# lightgbm_dnase_binary_files_path,
# lightgbm_motif_binary_files_path)
validation_data = lgb.Dataset("%s/lightGBM.all.%s.%s.bin" % (
lightgbm_motif_binary_files_path, other_cell_line, "chrom_set_test"), reference=train_data)
list_validation_data.append(validation_data)
def hyperopt_objective(argsDict):
"""Objective function for Gradient Boosting Machine Hyperparameter Optimization"""
# Keep track of evals
global ITERATION
ITERATION += 1
global bayes_trials
with open("%s/%s.%s.%s_bayes_trials.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name), 'wb') as fout:
pickle.dump(bayes_trials, fout)
# Make sure parameters that need to be integers are integers
for parameter_name in ['num_leaves',
'min_data_in_leaf',
# 'max_depth'
]:
argsDict[parameter_name] = int(argsDict[parameter_name])
start = timer()
params = {
'boosting_type': 'gbdt',
# 'ignore_column': list(range(500)),
# 'boosting_type': 'dart',
# 'drop_rate': 0.3,
# 'max_drop': 50,
# 'skip_drop': 0.5,
# 'drop_seed': 6,
# 'pos_bagging_fraction': 0.001,
# 'neg_bagging_fraction': 0.001,
# 'bagging_freq': 10000,
# 'bagging_seed': 6,
'objective': 'binary',
# 'objective': focal_binary_object,
# 'metric': ['binary_error', 'binary_logloss', "auc"],
'metric': ["auc"],
# 'first_metric_only': True,
# 'is_unbalance': True,
# "scale_pos_weight": 100,
# 'feature_fraction_bynode': True,
'metric_freq': 10,
'num_leaves': argsDict['num_leaves'],
'min_data_in_leaf': argsDict['min_data_in_leaf'],
# 'min_data_in_leaf': 20,
# 'max_depth': argsDict['max_depth'],
# 'min_sum_hessian_in_leaf': argsDict['min_sum_hessian_in_leaf'],
# 'bagging_fraction': argsDict['bagging_fraction'],
# 'feature_fraction': argsDict['feature_fraction'],
# 'lambda_l1': argsDict['lambda_l1'],
# 'lambda_l2': argsDict['lambda_l2'],
# 'max_bin': 255,
'num_threads': num_threads,
# 'learning_rate': argsDict['learning_rate'],
'learning_rate': 0.1,
'bagging_freq': 1,
'boost_from_average': False,
'verbose': 1
}
evals_result = {}
valid_names = ['train'] + ["%s_%s" % (other_cell_line, "set_test") \
for other_cell_line in other_cell_lines]
gbm = lgb.train(params,
train_set=train_data,
fobj=lambda x, y: focal_isoform_binary_object(x, y,
# alpha=float(
# np.clip(argsDict['alpha'], 0.001, 0.999)),
alpha=1. / (1. + np.exp(
-argsDict['alpha_isoform'])),
beta=argsDict['beta'],
gamma=argsDict['gamma']),
valid_sets=[train_data] + list_validation_data,
valid_names=valid_names,
feval=lgb_auprc_score,
num_boost_round=300,
# early_stopping_rounds=20,
evals_result=evals_result,
keep_training_booster=False,
callbacks=[early_stopping(20, first_metric_only=False, verbose=True)],
)
run_time = timer() - start
# Extract the best score
# best_score = np.max(cv_results['auprc-mean'])
auprc_sum = None
n = 0
for valid_name in valid_names:
if valid_name != "train":
if auprc_sum is None:
auprc_sum = np.array(evals_result[valid_name]['auprc'])
else:
auprc_sum += np.array(evals_result[valid_name]['auprc'])
n += 1
best_score = np.max(auprc_sum / n)
# Loss must be minimized
loss = 1 - best_score
# Boosting rounds that returned the highest cv score
# n_estimators = int(np.argmax(cv_results['auprc-mean']) + 1)
n_estimators = int(np.argmax(auprc_sum) + 1)
print('auprc:{} ITERATION:{} n_estimators:{} run_time:{}'.format(best_score, ITERATION, n_estimators,
run_time),
end="\n")
# Dictionary with information for evaluation
return {'loss': loss,
'params': argsDict,
'iteration': ITERATION,
'estimators': n_estimators,
'gbm': gbm,
'evals_result': evals_result,
'train_time': run_time,
'status': STATUS_OK}
# return loss
# Define the search space
space = {
# 'class_weight': hp.choice('class_weight', [None, 'balanced']),
'num_leaves': hp.qloguniform('num_leaves', np.log(15), np.log(1023), 5),
# 'max_depth': hp.quniform('max_depth', 3, 63, 1),
# 'min_sum_hessian_in_leaf': hp.loguniform('min_sum_hessian_in_leaf', np.log(0.001), np.log(1)),
# 'learning_rate': hp.loguniform('learning_rate', np.log(0.001), np.log(0.2)),
'min_data_in_leaf': hp.quniform('min_data_in_leaf', 20, 1000, 5),
# 'lambda_l1': hp.uniform('lambda_l1', 0.0, 1.0),
# 'lambda_l2': hp.uniform('lambda_l2', 0.0, 1.0),
# 'bagging_fraction': hp.uniform('bagging_fraction', 0.4, 1.0),
# 'feature_fraction': hp.uniform('feature_fraction', 0.4, 1.0),
# 'alpha': hp.loguniform('alpha', np.log(1), np.log(100)),
# 'alpha': hp.normal('alpha', 0.5, 0.15),
'alpha_isoform': hp.normal('alpha_isoform', 0, 3),
'beta': hp.uniform('beta', -10, 10),
'gamma': hp.loguniform('gamma', np.log(1), np.log(20)),
}
# Keep track of results
global bayes_trials
# bayes_trials = Trials()
bayes_trials_file_path = "%s/%s.%s.%s_bayes_trials.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name)
if os.path.exists(bayes_trials_file_path):
with open(bayes_trials_file_path, 'rb') as fin:
bayes_trials = pickle.load(fin, encoding='latin1')
else:
bayes_trials = generate_trials_to_calculate(
[{'num_leaves': 63, 'min_data_in_leaf': 20, 'alpha_isoform': 0, 'beta': -1.5, 'gamma': 1.01}])
# Global variable
global ITERATION
ITERATION = 0
# Run optimization
best = fmin(fn=hyperopt_objective, space=space, algo=tpe.suggest,
max_evals=len(bayes_trials.tids)+30, trials=bayes_trials, rstate=np.random.RandomState(6))
# Sort the trials with lowest loss (highest AUC) first
bayes_trials_results = sorted(bayes_trials.results, key=lambda x: x['loss'])
# bayes_trials_results[:10]
with open("%s/%s.%s.%s_model.hyperopt.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name), 'wb') as fout:
pickle.dump(bayes_trials_results[0]['gbm'], fout)
with open("%s/%s.%s.%s_evals_result.hyperopt.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name),
'wb') as outfile_evals_result:
pickle.dump(bayes_trials_results[0]['evals_result'], outfile_evals_result, pickle.HIGHEST_PROTOCOL)
with open("%s/%s.%s.%s_bayes_trials.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name), 'wb') as fout:
pickle.dump(bayes_trials, fout)
def train_models_autoencoder(self, cell_line, chrom_set_name, lightgbm_dnase_binary_files_path=None,
lightgbm_motif_binary_files_path=None, dir_out=None, num_threads=16):
if lightgbm_motif_binary_files_path is None:
lightgbm_motif_binary_files_path = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_dnase_binary_files_path is None:
lightgbm_dnase_binary_files_path = "./train/data/dnase_feature_binary_files"
if dir_out is None:
dir_out = "./train/%s/models/" % self.training_tf_name
params = {
'boosting_type': 'gbdt',
# 'boosting_type': 'dart',
# 'drop_rate': 0.3,
# 'max_drop': 50,
# 'skip_drop': 0.5,
# 'drop_seed': 6,
# 'pos_bagging_fraction': 1,
# 'neg_bagging_fraction': 0.01,
# 'bagging_freq': 10000,
# 'bagging_seed': 6,
'objective': 'binary',
# 'objective': focal_binary_object,
# 'metric': ['binary_error', 'binary_logloss', "auc"],
'metric': ["auc"],
# 'is_unbalance': True,
# "scale_pos_weight": 100,
'metric_freq': 10,
'num_leaves': 63,
# 'max_bin': 255,
'num_threads': num_threads,
'learning_rate': 0.1,
'feature_fraction': 1,
'boost_from_average': False,
'verbose': 1
}
# other_set_type = list(set(self.chrom_sets.keys()) - {chrom_set_name})[0]
if len(self.df_all_regions_label.columns[3:]) > 1:
other_cell_lines = list(set(self.df_all_regions_label.columns[3:]) - {cell_line})
else:
other_cell_lines = [self.df_all_regions_label.columns[3]]
# train_data = self.merge_lightgbm_binary_data(cell_line, chrom_set_name, lightgbm_dnase_binary_files_path,
# lightgbm_motif_binary_files_path)
train_data = lgb.Dataset(
"%s/lightGBM.autoencoder.all.%s.%s.bin" % (lightgbm_motif_binary_files_path, cell_line, chrom_set_name))
list_validation_data = []
for other_cell_line in other_cell_lines:
# validation_data = self.merge_lightgbm_binary_data(other_cell_line, "chrom_set_test",
# lightgbm_dnase_binary_files_path,
# lightgbm_motif_binary_files_path)
validation_data = lgb.Dataset("%s/lightGBM.autoencoder.all.%s.%s.bin" % (
lightgbm_motif_binary_files_path, other_cell_line, "chrom_set_test"), reference=train_data)
list_validation_data.append(validation_data)
evals_result = {}
gbm = lgb.train(params=params,
train_set=train_data,
fobj=lambda x, y: focal_isoform_binary_object(x, y, alpha=0.5, beta=-1.5, gamma=1.01),
# fobj=lambda x,y:logistic_obj(x,y,imbalance_alpha=1.0),
valid_sets=[train_data] + list_validation_data,
valid_names=['train'] + ["%s_%s" % (other_cell_line, "set_test") \
for other_cell_line in other_cell_lines],
feval=lgb_auprc_score,
early_stopping_rounds=20,
evals_result=evals_result,
keep_training_booster=False)
with open("%s/%s.%s.%s_model.autoencoder.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name), 'wb') as fout:
pickle.dump(gbm, fout)
with open("%s/%s.%s.%s_evals_result.autoencoder.pkl" % (
dir_out, self.training_tf_name, cell_line, chrom_set_name),
'wb') as outfile_evals_result:
pickle.dump(evals_result, outfile_evals_result, pickle.HIGHEST_PROTOCOL)
def make_prediction(self, cell_line, chrom, dir_dnase_feature_median=None, lightgbm_model_files_path=None,
dir_out=None):
if dir_dnase_feature_median is None:
dir_dnase_feature_median = "./hdf5s/DNase/median"
if lightgbm_model_files_path is None:
lightgbm_model_files_path = "./train/%s/models/" % self.training_tf_name
if dir_out is None:
dir_out = "./train/%s/predictions/" % self.training_tf_name
cell_line = str(cell_line)
scores = self.get_dnase_features(cell_line, chrom, dir_dnase_feature_median)
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, cell_line),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
# _ = qt.transform(scores[:, :int(scores.shape[1] / 2)])
_ = qt.transform(scores)
with h5py.File(self.region_topic_model_h5, "r") as region_topic_infile:
scores_topic = region_topic_infile[chrom][...]
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
scores_motif = infile['scores'][...]
scores_all = np.hstack((scores, scores_topic[:, 8:], scores_motif))
# model_files = glob.glob("%s/%s_*_model.pkl" % (lightgbm_model_files_path, self.training_tf_name))
model_files = ["%s/%s.%s.%s_model.pkl" % (
lightgbm_model_files_path, self.training_tf_name, training_cell_line, training_chrom_set_name)
for training_cell_line in list(self.df_all_regions_label.columns[3:])
for training_chrom_set_name in sorted(list(set(self.chrom_sets.keys()) - {'chrom_set_test'}))]
model_files = np.array(model_files, dtype='S')
preds = np.zeros((scores_all.shape[0], len(model_files)))
for ind_model_file, model_file in enumerate(model_files):
with open(model_file, 'rb') as fin:
gbm = pickle.load(fin, encoding='latin1')
ypred = gbm.predict(scores_all)
preds[:, ind_model_file] = ypred
with h5py.File('%s/%s.%s.%s_preds.h5' % (dir_out, self.training_tf_name, cell_line, chrom),
"w") as outfile:
outfile.create_dataset("model_files", data=model_files,
shape=(len(model_files),),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("preds", data=preds,
shape=preds.shape,
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
def make_prediction_hyperopt(self, cell_line, chrom, dir_dnase_feature_median=None, lightgbm_model_files_path=None,
dir_out=None):
if dir_dnase_feature_median is None:
dir_dnase_feature_median = "./hdf5s/DNase/median"
if lightgbm_model_files_path is None:
lightgbm_model_files_path = "./train/%s/models/" % self.training_tf_name
if dir_out is None:
dir_out = "./train/%s/predictions/" % self.training_tf_name
scores = self.get_dnase_features(cell_line, chrom, dir_dnase_feature_median)
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, cell_line),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
# _ = qt.transform(scores[:, :int(scores.shape[1] / 2)])
_ = qt.transform(scores)
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
scores_motif = infile['scores'][...]
scores_all = np.hstack((scores, scores_motif))
# model_files = glob.glob("%s/%s_*_model.pkl" % (lightgbm_model_files_path, self.training_tf_name))
model_files = ["%s/%s.%s.%s_model.hyperopt.pkl" % (
lightgbm_model_files_path, self.training_tf_name, training_cell_line, training_chrom_set_name)
for training_cell_line in list(self.df_all_regions_label.columns[3:])
for training_chrom_set_name in sorted(list(set(self.chrom_sets.keys()) - {'chrom_set_test'}))]
model_files = np.array(model_files, dtype='S')
preds = np.zeros((scores_all.shape[0], len(model_files)))
for ind_model_file, model_file in enumerate(model_files):
with open(model_file, 'rb') as fin:
gbm = pickle.load(fin, encoding='latin1')
ypred = gbm.predict(scores_all)
preds[:, ind_model_file] = ypred
with h5py.File('%s/%s.%s.%s_preds.hyperopt.h5' % (dir_out, self.training_tf_name, cell_line, chrom),
"w") as outfile:
outfile.create_dataset("model_files", data=model_files,
shape=(len(model_files),),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("preds", data=preds,
shape=preds.shape,
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
def make_prediction_leaf(self, cell_line, chrom, dir_dnase_feature_median=None, lightgbm_model_files_path=None,
dir_out=None):
if dir_dnase_feature_median is None:
dir_dnase_feature_median = "./hdf5s/DNase/median"
if lightgbm_model_files_path is None:
lightgbm_model_files_path = "./train/%s/models/" % self.training_tf_name
if dir_out is None:
dir_out = "./train/%s/predictions/" % self.training_tf_name
scores = self.get_dnase_features(cell_line, chrom, dir_dnase_feature_median)
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, cell_line),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
# _ = qt.transform(scores[:, :int(scores.shape[1] / 2)])
_ = qt.transform(scores)
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
scores_motif = infile['scores'][...]
scores_all = np.hstack((scores, scores_motif))
# model_files = glob.glob("%s/%s_*_model.pkl" % (lightgbm_model_files_path, self.training_tf_name))
model_files = ["%s/%s.%s.%s_model.pkl" % (
lightgbm_model_files_path, self.training_tf_name, training_cell_line, training_chrom_set_name)
for training_cell_line in list(self.df_all_regions_label.columns[3:])
for training_chrom_set_name in sorted(list(set(self.chrom_sets.keys()) - {'chrom_set_test'}))]
with h5py.File('%s/%s.%s.%s_pred_leafs.h5' % (dir_out, self.training_tf_name, cell_line, chrom),
"w") as outfile:
for ind_model_file, model_file in enumerate(model_files):
with open(model_file, 'rb') as fin:
gbm = pickle.load(fin, encoding='latin1')
leafs = gbm.predict(scores_all, raw_score=False, pred_leaf=True, pred_contrib=False)
leaf_outputs = np.zeros(leafs.shape)
for i in range(leafs.shape[0]):
for j in range(leafs.shape[1]):
leaf_outputs[i, j] = gbm.get_leaf_output(j, leafs[i, j])
gc.collect()
outfile.create_dataset("%s/%s" % (model_file.split("/")[-1], cell_line),
data=leaf_outputs,
shape=leaf_outputs.shape,
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.flush()
training_cell_line = model_file.split("/")[-1].split('.')[1]
source_scores = self.get_dnase_features(training_cell_line, chrom, dir_dnase_feature_median)
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, training_cell_line),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
# _ = qt.transform(scores[:, :int(scores.shape[1] / 2)])
_ = qt.transform(source_scores)
# source_scores_all = np.hstack((source_scores, scores_motif))
gc.collect()
leafs = gbm.predict(np.hstack((source_scores, scores_motif)), raw_score=False, pred_leaf=True,
pred_contrib=False)
leaf_outputs = np.zeros(leafs.shape)
for i in range(leafs.shape[0]):
for j in range(leafs.shape[1]):
leaf_outputs[i, j] = gbm.get_leaf_output(j, leafs[i, j])
outfile.create_dataset("%s/%s" % (model_file.split("/")[-1], training_cell_line),
data=leaf_outputs,
shape=leaf_outputs.shape,
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.flush()
model_files = np.array(model_files, dtype='S')
outfile.create_dataset("model_files", data=model_files,
shape=(len(model_files),),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
def make_prediction_autoencoder(self, cell_line, chrom, dir_dnase_feature_median=None,
lightgbm_model_files_path=None,
dir_out=None):
if dir_dnase_feature_median is None:
dir_dnase_feature_median = "./hdf5s/DNase/median"
if lightgbm_model_files_path is None:
lightgbm_model_files_path = "./train/%s/models/" % self.training_tf_name
if dir_out is None:
dir_out = "./train/%s/predictions/" % self.training_tf_name
scores_autoencoder = self.get_dnase_features_autoencoder(cell_line, chrom, './hdf5s/DNase')
# scores = self.get_dnase_features(cell_line, chrom, dir_dnase_feature_median)
# with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, cell_line),
# 'rb') as fin:
# qt = pickle.load(fin, encoding='latin1')
# # _ = qt.transform(scores[:, :int(scores.shape[1] / 2)])
# _ = qt.transform(scores)
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
scores_motif = infile['scores'][...]
# scores_all = np.hstack((scores_autoencoder, scores, scores_motif))
scores_all = np.hstack((scores_autoencoder, scores_motif))
# model_files = glob.glob("%s/%s_*_model.pkl" % (lightgbm_model_files_path, self.training_tf_name))
model_files = ["%s/%s.%s.%s_model.autoencoder.pkl" % (
lightgbm_model_files_path, self.training_tf_name, training_cell_line, training_chrom_set_name)
for training_cell_line in list(self.df_all_regions_label.columns[3:])
for training_chrom_set_name in sorted(list(set(self.chrom_sets.keys()) - {'chrom_set_test'}))]
model_files = np.array(model_files, dtype='S')
preds = np.zeros((scores_all.shape[0], len(model_files)))
for ind_model_file, model_file in enumerate(model_files):
with open(model_file, 'rb') as fin:
gbm = pickle.load(fin, encoding='latin1')
ypred = gbm.predict(scores_all)
preds[:, ind_model_file] = ypred
with h5py.File('%s/%s.%s.%s_preds.autoencoder.h5' % (dir_out, self.training_tf_name, cell_line, chrom),
"w") as outfile:
outfile.create_dataset("model_files", data=model_files,
shape=(len(model_files),),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("preds", data=preds,
shape=preds.shape,
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
def evaluation(self, cell_line, lightgbm_preds_files_path=None, dir_out=None):
if dir_out is None:
dir_out = "./train/%s/evaluations/" % self.training_tf_name
cell_line = str(cell_line)
df_test_regions_label = pd.read_csv(
"%s/%s.%s" % (
self.config['test_cell_types_regions_label_path'], self.training_tf_name,
self.config['test_cell_types_regions_label_name']), sep="\t", header=0)
list_preds_binary = []
# list_preds_binary_2 = []
list_labels = []
list_preds_matrix = []
list_chroms = []
list_starts = []
for chrom in self.chrom_all:
with h5py.File(
'%s/%s.%s.%s_preds.h5' % (lightgbm_preds_files_path, self.training_tf_name, cell_line, chrom),
"r") as infile:
model_files = infile['model_files'][...]
preds = infile['preds'][...]
labels = np.array(df_test_regions_label.loc[df_test_regions_label['chr'] == chrom, :][cell_line])
list_preds_matrix.append(preds)
preds_binary = np.mean(1. / (1. + np.exp(-preds)), axis=1)
# preds_binary_2 = 1. / (1. + np.exp(-np.mean(preds, axis=1)))
list_preds_binary.append(preds_binary)
# list_preds_binary_2.append(preds_binary_2)
list_labels.append(labels)
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
list_starts.append(infile['starts'][...])
list_chroms.append(np.array([chrom] * infile['starts'].shape[0]))
labels = np.hstack(list_labels)
preds = np.hstack(list_preds_binary)
# preds_2 = np.hstack(list_preds_binary_2)
preds_matrix = np.vstack(list_preds_matrix)
starts = np.hstack(list_starts)
chroms = np.hstack(list_chroms)
ignore_index = np.where(labels == "A")[0]
preds_matrix = np.delete(preds_matrix, ignore_index, axis=0)
preds = np.delete(preds, ignore_index, axis=0)
label_b_u = np.delete(labels, ignore_index, axis=0)
starts = np.delete(starts, ignore_index, axis=0)
chroms = np.delete(chroms, ignore_index, axis=0)
label_b_u = np.array(list(map(lambda x: 1 if x == "B" else 0, label_b_u)))
with open("%s/%s.%s_performance.txt" % (dir_out, self.training_tf_name, cell_line), "w") as outfile:
fpr, tpr, thresholds = metrics.roc_curve(label_b_u, preds, pos_label=1)
auc = metrics.auc(fpr, tpr)
auprc = average_precision_score(label_b_u, preds)
outfile.write("average model: auc:%.6f auprc:%.6f\n" % (auc, auprc))
temp = []
for i in range(preds_matrix.shape[1]):
fpr, tpr, thresholds = metrics.roc_curve(label_b_u, preds_matrix[:, i], pos_label=1)
auc = metrics.auc(fpr, tpr)
# auprc = average_precision_score(label_b_u, preds_matrix[:, i])
auprc = average_precision_score(label_b_u, 1. / (1. + np.exp(-preds_matrix[:, i])))
outfile.write("%s model: auc:%.6f auprc:%.6f\n" % (
model_files[i].decode().split("/")[-1].replace('_model.pkl', ''), auc, auprc))
precision, recall, thresholds = precision_recall_curve(label_b_u,
1. / (1. + np.exp(-preds_matrix[:, i])),
pos_label=1)
df_temp = pd.DataFrame(None)
df_temp["precision"] = precision
df_temp["recall"] = recall
df_temp["model"] = model_files[i].decode().split("/")[-1]
temp.append(df_temp.sample(n=min(100000, df_temp.shape[0])))
df_plot = pd.concat(temp, ignore_index=True)
plt.figure(figsize=(8, 6))
ax = sns.lineplot(x="recall", y="precision", data=df_plot, hue='model', palette="tab10")
ax.set_title("%s in %s" % (self.training_tf_name, cell_line))
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.get_figure().savefig("%s/%s_%s_PRC.pdf" % (dir_out, self.training_tf_name, cell_line))
df_plot.to_csv(
"%s/df_plot.PRC.%s.xls" % (
dir_out, cell_line),
sep="\t", header=True, index=False)
with open("%s/%s.%s_confusion_matrix.txt" % (dir_out, self.training_tf_name, cell_line), "w") as outfile:
for i in range(preds_matrix.shape[1]):
one_preds = 1. / (1. + np.exp(-preds_matrix[:, i]))
cutoff = 0.5
true_positive = np.where((one_preds >= cutoff) & (label_b_u == 1))[0]
false_positive = np.where((one_preds >= cutoff) & (label_b_u == 0))[0]
false_negative = np.where((one_preds < cutoff) & (label_b_u == 1))[0]
outfile.write("%s model: all_regions:%d true_positive:%d false_positive:%d false_negative:%d\n" % (
model_files[i].decode().split("/")[-1].replace('_model.pkl', ''), len(one_preds),
len(true_positive), len(false_positive), len(false_negative)))
df = pd.DataFrame(None)
df["chrom"] = np.hstack((chroms[true_positive], chroms[false_positive], chroms[false_negative]))
df["start"] = np.hstack((starts[true_positive], starts[false_positive], starts[false_negative]))
df["preds"] = np.hstack(
(one_preds[true_positive], one_preds[false_positive], one_preds[false_negative]))
df["label"] = np.hstack(
(label_b_u[true_positive], label_b_u[false_positive], label_b_u[false_negative]))
df["class"] = ['true_positive'] * len(true_positive) + ['false_positive'] * len(false_positive) + [
'false_negative'] * len(false_negative)
df.to_csv(
"%s/df.%s.regions.%s.xls" % (
dir_out, model_files[i].decode().split("/")[-1].replace('_model.pkl', ''), cell_line),
sep="\t", header=True, index=False)
def evaluation_hyperopt(self, cell_line, lightgbm_preds_files_path=None, dir_out=None):
if dir_out is None:
dir_out = "./train/%s/evaluations/" % self.training_tf_name
df_test_regions_label = pd.read_csv(
"%s/%s.%s" % (
self.config['test_cell_types_regions_label_path'], self.training_tf_name,
self.config['test_cell_types_regions_label_name']), sep="\t", header=0)
list_preds_binary = []
# list_preds_binary_2 = []
list_labels = []
list_preds_matrix = []
list_chroms = []
list_starts = []
for chrom in self.chrom_all:
with h5py.File(
'%s/%s.%s.%s_preds.hyperopt.h5' % (lightgbm_preds_files_path, self.training_tf_name, cell_line, chrom),
"r") as infile:
model_files = infile['model_files'][...]
preds = infile['preds'][...]
labels = np.array(df_test_regions_label.loc[df_test_regions_label['chr'] == chrom, :][cell_line])
list_preds_matrix.append(preds)
preds_binary = np.mean(1. / (1. + np.exp(-preds)), axis=1)
# preds_binary_2 = 1. / (1. + np.exp(-np.mean(preds, axis=1)))
list_preds_binary.append(preds_binary)
# list_preds_binary_2.append(preds_binary_2)
list_labels.append(labels)
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
list_starts.append(infile['starts'][...])
list_chroms.append(np.array([chrom] * infile['starts'].shape[0]))
labels = np.hstack(list_labels)
preds = np.hstack(list_preds_binary)
# preds_2 = np.hstack(list_preds_binary_2)
preds_matrix = np.vstack(list_preds_matrix)
starts = np.hstack(list_starts)
chroms = np.hstack(list_chroms)
ignore_index = np.where(labels == "A")[0]
preds_matrix = np.delete(preds_matrix, ignore_index, axis=0)
preds = np.delete(preds, ignore_index, axis=0)
label_b_u = np.delete(labels, ignore_index, axis=0)
starts = np.delete(starts, ignore_index, axis=0)
chroms = np.delete(chroms, ignore_index, axis=0)
label_b_u = np.array(list(map(lambda x: 1 if x == "B" else 0, label_b_u)))
with open("%s/%s.%s_performance.hyperopt.txt" % (dir_out, self.training_tf_name, cell_line), "w") as outfile:
fpr, tpr, thresholds = metrics.roc_curve(label_b_u, preds, pos_label=1)
auc = metrics.auc(fpr, tpr)
auprc = average_precision_score(label_b_u, preds)
outfile.write("average model: auc:%.6f auprc:%.6f\n" % (auc, auprc))
temp = []
for i in range(preds_matrix.shape[1]):
fpr, tpr, thresholds = metrics.roc_curve(label_b_u, preds_matrix[:, i], pos_label=1)
auc = metrics.auc(fpr, tpr)
# auprc = average_precision_score(label_b_u, preds_matrix[:, i])
auprc = average_precision_score(label_b_u, 1. / (1. + np.exp(-preds_matrix[:, i])))
outfile.write("%s model: auc:%.6f auprc:%.6f\n" % (
model_files[i].decode().split("/")[-1].replace('_model.pkl', ''), auc, auprc))
precision, recall, thresholds = precision_recall_curve(label_b_u, preds, pos_label=1)
df_temp = | pd.DataFrame(None) | pandas.DataFrame |
#!/usr/bin/env python3
import sys
import json
import glob
import os.path
import matplotlib as mpl
import pandas as pd
import numpy as np
import scipy.sparse.csgraph as csg
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
folder = sys.argv[1].rstrip('/')
tables = glob.glob('{}/*-analyzed.csv'.format(folder))
print('loading {}'.format(', '.join(t.rsplit('/')[-1] for t in tables)))
frames = [ | pd.read_csv(t) | pandas.read_csv |
#!/home/jmframe/programs/anaconda3/bin/python3
import mannkendal as mk
"""
Run Mann Kendall test a lot of sites...
"""
import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
att_path = "/home/NearingLab/data/camels_attributes_v2.0/camels_all.txt"
attributes = pd.read_csv(att_path, sep=";")
qp = | pd.read_csv('results-mk-runoff-ratio.txt', sep=" ") | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import zipfile
import os
import geopy.distance
import random
import pandas as pd
import numpy as np
import csv
from enum import Enum
from yaml import safe_load
from maro.cli.data_pipeline.utils import download_file, StaticParameter
from maro.utils.logger import CliLogger
from maro.cli.data_pipeline.base import DataPipeline, DataTopology
logger = CliLogger(name=__name__)
class CitiBikePipeline(DataPipeline):
_download_file_name = "trips.zip"
_station_info_file_name = "full_station.json"
_clean_file_name = "trips.csv"
_build_file_name = "trips.bin"
_station_meta_file_name = "station_meta.csv"
_distance_file_name = "distance_adj.csv"
_meta_file_name = "trips.yml"
def __init__(self, topology: str, source: str, station_info: str, is_temp: bool = False):
"""
Generate citi_bike data bin and other necessary files for the specified topology from specified source.
They will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file and other necessary files
/source
/_download original data files
/_clean cleaned data files
/temp download temp files
Args:
topology(str): topology name of the data files
source(str): source url of original data file
station_info(str): source url of station info file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, source, is_temp)
self._station_info = station_info
self._station_info_file = os.path.join(self._download_folder, self._station_info_file_name)
self._distance_file = os.path.join(self._build_folder, self._distance_file_name)
self._station_meta_file = os.path.join(self._build_folder, self._station_meta_file_name)
self._common_data = {}
def download(self, is_force: bool = False):
"""download the zip file"""
super().download(is_force)
self._new_file_list.append(self._station_info_file)
if (not is_force) and os.path.exists(self._station_info_file):
logger.info_green("File already exists, skipping download.")
else:
logger.info_green(f"Downloading trip data from {self._station_info} to {self._station_info_file}")
download_file(source=self._station_info, destination=self._station_info_file)
def clean(self):
"""unzip the csv file and process it for building binary file"""
super().clean()
logger.info_green("Cleaning trip data")
if os.path.exists(self._download_file):
# unzip
logger.info_green("Unzip start")
with zipfile.ZipFile(self._download_file, "r") as zip_ref:
for filename in zip_ref.namelist():
# Only one csv file is expected.
if (
filename.endswith(".csv") and
(not (filename.startswith("__MACOSX") or filename.startswith(".")))
):
logger.info_green(f"Unzip {filename} from {self._download_file}")
zip_ref.extractall(self._clean_folder, [filename])
unzip_file = os.path.join(self._clean_folder, filename)
self._new_file_list.append(unzip_file)
self._preprocess(unzipped_file=unzip_file)
break
else:
logger.warning(f"Not found downloaded trip data: {self._download_file}")
def _read_common_data(self):
"""read and full init data and existed stations"""
full_stations = None
with open(self._station_info_file, mode="r", encoding="utf-8") as station_file:
# read station to station file
raw_station_data = pd.DataFrame.from_dict(pd.read_json(station_file)["data"]["stations"])
station_data = raw_station_data.rename(columns={
"lon": "station_longitude",
"lat": "station_latitude",
"region_id": "region"})
# group by station to generate station init info
full_stations = station_data[
["station_id", "capacity", "station_longitude", "station_latitude"]
].reset_index(drop=True)
# generate station id by index
full_stations["station_id"] = pd.to_numeric(full_stations["station_id"], downcast="integer")
full_stations["capacity"] = pd.to_numeric(full_stations["capacity"], downcast="integer")
full_stations["station_longitude"] = pd.to_numeric(full_stations["station_longitude"], downcast="float")
full_stations["station_latitude"] = pd.to_numeric(full_stations["station_latitude"], downcast="float")
full_stations.drop(full_stations[full_stations["capacity"] == 0].index, axis=0, inplace=True)
full_stations.dropna(
subset=["station_id", "capacity", "station_longitude", "station_latitude"], inplace=True
)
self._common_data["full_stations"] = full_stations
self._common_data["full_station_num"] = len(self._common_data["full_stations"])
self._common_data["full_dock_num"] = self._common_data["full_stations"]["capacity"].sum()
def _read_src_file(self, file: str):
"""read and return processed rows"""
ret = []
if os.path.exists(file):
# For ignoring the unimportant issues in the source file.
with open(file, "r", encoding="utf-8", errors="ignore") as fp:
ret = pd.read_csv(fp)
ret = ret[[
"tripduration", "starttime", "start station id", "end station id", "start station latitude",
"start station longitude", "end station latitude", "end station longitude", "gender", "usertype",
"bikeid"
]]
ret["tripduration"] = pd.to_numeric(
pd.to_numeric(ret["tripduration"], downcast="integer") / 60, downcast="integer"
)
ret["starttime"] = pd.to_datetime(ret["starttime"])
ret["start station id"] = pd.to_numeric(ret["start station id"], errors="coerce", downcast="integer")
ret["end station id"] = pd.to_numeric(ret["end station id"], errors="coerce", downcast="integer")
ret["start station latitude"] = pd.to_numeric(ret["start station latitude"], downcast="float")
ret["start station longitude"] = pd.to_numeric(ret["start station longitude"], downcast="float")
ret["end station latitude"] = pd.to_numeric(ret["end station latitude"], downcast="float")
ret["end station longitude"] = pd.to_numeric(ret["end station longitude"], downcast="float")
ret["bikeid"] = pd.to_numeric(ret["bikeid"], errors="coerce", downcast="integer")
ret["gender"] = pd.to_numeric(ret["gender"], errors="coerce", downcast="integer")
ret["usertype"] = ret["usertype"].apply(str).apply(
lambda x: 0 if x in ["Subscriber", "subscriber"] else 1 if x in ["Customer", "customer"] else 2
)
ret.dropna(subset=[
"start station id", "end station id", "start station latitude", "end station latitude",
"start station longitude", "end station longitude"
], inplace=True)
ret.drop(
ret[
(ret["tripduration"] <= 1) |
(ret["start station latitude"] == 0) |
(ret["start station longitude"] == 0) |
(ret["end station latitude"] == 0) |
(ret["end station longitude"] == 0)
].index,
axis=0,
inplace=True
)
ret = ret.sort_values(by="starttime", ascending=True)
return ret
def _process_src_file(self, src_data: pd.DataFrame):
used_bikes = len(src_data[["bikeid"]].drop_duplicates(subset=["bikeid"]))
trip_data = src_data[
(src_data["start station latitude"] > 40.689960) &
(src_data["start station latitude"] < 40.768334) &
(src_data["start station longitude"] > -74.019623) &
(src_data["start station longitude"] < -73.909760)
]
trip_data = trip_data[
(trip_data["end station latitude"] > 40.689960) &
(trip_data["end station latitude"] < 40.768334) &
(trip_data["end station longitude"] > -74.019623) &
(trip_data["end station longitude"] < -73.909760)
]
trip_data["start_station_id"] = trip_data["start station id"]
trip_data["end_station_id"] = trip_data["end station id"]
# get new stations
used_stations = []
used_stations.append(
trip_data[["start_station_id", "start station latitude", "start station longitude", ]].drop_duplicates(
subset=["start_station_id"]).rename(
columns={
"start_station_id": "station_id",
"start station latitude": "latitude",
"start station longitude": "longitude"
}))
used_stations.append(
trip_data[["end_station_id", "end station latitude", "end station longitude", ]].drop_duplicates(
subset=["end_station_id"]).rename(
columns={
"end_station_id": "station_id",
"end station latitude": "latitude",
"end station longitude": "longitude"
}))
in_data_station = pd.concat(used_stations, ignore_index=True).drop_duplicates(
subset=["station_id"]
).sort_values(by=["station_id"]).reset_index(drop=True)
stations_existed = pd.DataFrame(in_data_station[["station_id"]])
stations_existed["station_index"] = pd.to_numeric(stations_existed.index, downcast="integer")
# get start station id and end station id
trip_data = trip_data.join(
stations_existed.set_index("station_id"),
on="start_station_id"
).rename(columns={"station_index": "start_station_index"})
trip_data = trip_data.join(
stations_existed.set_index("station_id"),
on="end_station_id"
).rename(columns={"station_index": "end_station_index"})
trip_data = trip_data.rename(columns={"starttime": "start_time", "tripduration": "duration"})
trip_data = trip_data[
["start_time", "start_station_id", "end_station_id", "duration", "start_station_index", "end_station_index"]
]
return trip_data, used_bikes, in_data_station, stations_existed
def _process_current_topo_station_info(
self, stations_existed: pd.DataFrame, used_bikes: int, loc_ref: pd.DataFrame
):
data_station_init = stations_existed.join(
self._common_data["full_stations"][["station_id", "capacity"]].set_index("station_id"),
on="station_id"
).join(
loc_ref[["station_id", "latitude", "longitude"]].set_index("station_id"),
on="station_id"
)
# data_station_init.rename(columns={"station_id": "station_index"}, inplace=True)
avg_capacity = int(self._common_data["full_dock_num"] / self._common_data["full_station_num"])
avalible_bike_rate = used_bikes / self._common_data["full_dock_num"]
values = {"capacity": avg_capacity}
data_station_init.fillna(value=values, inplace=True)
data_station_init["init"] = (data_station_init["capacity"] * avalible_bike_rate).round().apply(int)
data_station_init["capacity"] = pd.to_numeric(data_station_init["capacity"],
errors="coerce", downcast="integer")
data_station_init["station_id"] = pd.to_numeric(data_station_init["station_id"],
errors="coerce", downcast="integer")
return data_station_init
def _process_distance(self, station_info: pd.DataFrame):
distance_adj = pd.DataFrame(0, index=station_info["station_index"],
columns=station_info["station_index"], dtype=np.float)
look_up_df = station_info[["latitude", "longitude"]]
return distance_adj.apply(lambda x: pd.DataFrame(x).apply(lambda y: geopy.distance.distance(
(look_up_df.at[x.name, "latitude"], look_up_df.at[x.name, "longitude"]),
(look_up_df.at[y.name, "latitude"], look_up_df.at[y.name, "longitude"])
).km, axis=1), axis=1)
def _preprocess(self, unzipped_file: str):
self._read_common_data()
logger.info_green("Reading raw data")
org_data = self._read_src_file(file=unzipped_file)
logger.info_green("Processing trip data")
trip_data, used_bikes, in_data_station, stations_existed = self._process_src_file(src_data=org_data)
self._new_file_list.append(self._clean_file)
self._new_file_list.append(self._station_meta_file)
self._new_file_list.append(self._distance_file)
with open(self._clean_file, mode="w", encoding="utf-8", newline="") as f:
trip_data.to_csv(f, index=False, header=True)
logger.info_green("Processing init data")
station_info = self._process_current_topo_station_info(
stations_existed=stations_existed, used_bikes=used_bikes, loc_ref=in_data_station
)
with open(self._station_meta_file, mode="w", encoding="utf-8", newline="") as f:
station_info.to_csv(f, index=False, header=True)
logger.info_green("Processing distance data")
station_distance = self._process_distance(station_info=station_info)
with open(self._distance_file, mode="w", encoding="utf-8", newline="") as f:
station_distance.to_csv(f, index=False, header=True)
class WeatherPipeline(DataPipeline):
_last_day_temp = None # used to fill the temp for days which have no temp info
_download_file_name = "weather.csv"
_clean_file_name = "weather.csv"
_build_file_name = "KNYC_daily.bin"
_meta_file_name = "weather.yml"
class WeatherEnum(Enum):
SUNNY = 0
RAINY = 1
SNOWY = 2
SLEET = 3
def __init__(self, topology: str, source: str, is_temp: bool = False):
"""
Generate weather data bin for the specified topology from frontierweather.com.
Generated files will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file
/source
/_download original data file
/_clean cleaned data file
/temp download temp file
Args:
topology(str): topology name of the data file
source(str): source url of original data file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, source, is_temp)
self._common_data = {}
def clean(self):
super().clean()
if os.path.exists(self._download_file):
self._new_file_list.append(self._clean_file)
logger.info_green("Cleaning weather data")
self._preprocess(input_file=self._download_file, output_file=self._clean_file)
else:
logger.warning(f"Not found downloaded weather data: {self._download_file}")
def _weather(self, row: dict):
water_str = row["Precipitation Water Equiv"]
water = round(float(water_str), 2) if water_str != "" else 0.0
snow_str = row["Snowfall"]
snow = round(float(snow_str), 2) if snow_str != "" else 0.0
if snow > 0.0 and water > 0:
return WeatherPipeline.WeatherEnum.SLEET.value
elif water > 0.0:
return WeatherPipeline.WeatherEnum.RAINY.value
elif snow > 0.0:
return WeatherPipeline.WeatherEnum.SNOWY.value
else:
return WeatherPipeline.WeatherEnum.SUNNY.value
def _parse_date(self, row: dict):
dstr = row.get("Date", None)
return dstr
def _parse_row(self, row: dict):
date = self._parse_date(row=row)
wh = self._weather(row=row)
temp_str = row["Avg Temp"]
temp = round(float(temp_str), 2) if temp_str != "" and temp_str is not None else self._last_day_temp
self._last_day_temp = temp
return {"date": date, "weather": wh, "temp": temp} if date is not None else None
def _preprocess(self, input_file: str, output_file: str):
data: list = None
with open(input_file, "rt") as fp:
reader = csv.DictReader(fp)
data = [self._parse_row(row=row) for row in reader]
data = filter(None, data)
with open(output_file, "w+") as fp:
writer = csv.DictWriter(fp, ["date", "weather", "temp"])
writer.writeheader()
writer.writerows(data)
class CitiBikeTopology(DataTopology):
"""
Data topology for a predefined topology of citi_bike scenario.
Args:
topology(str): topology name of the data file
trip_source(str): original source url of citi_bike data
station_info(str): current status station info of the stations
weather_source(str): original source url of weather data
is_temp(bool): (optional) if the data file is temporary
"""
def __init__(self, topology: str, trip_source: str, station_info: str, weather_source: str, is_temp: bool = False):
super().__init__()
self._data_pipeline["trip"] = CitiBikePipeline(topology, trip_source, station_info, is_temp)
self._data_pipeline["weather"] = NOAAWeatherPipeline(topology, weather_source, is_temp)
self._is_temp = is_temp
def __del__(self):
if self._is_temp:
self.remove()
class CitiBikeToyPipeline(DataPipeline):
_clean_file_name = "trips.csv"
_build_file_name = "trips.bin"
_station_meta_file_name = "station_meta.csv"
_distance_file_name = "distance_adj.csv"
_meta_file_name = "trips.yml"
def __init__(
self, start_time: str, end_time: str, stations: list, trips: list, topology: str, is_temp: bool = False
):
"""
Generate synthetic business events and station initialization distribution for Citi Bike scenario,
from the predefined toy topologies.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file and other necessary files
Args:
start_time(str): start time of the toy data
end_time(str): end time of the toy data
stations(list): list of stations info
trips(list): list of trips probability
topology(str): topology name of the data files
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, "", is_temp)
self._start_time = start_time
self._end_time = end_time
self._stations = stations
self._trips = trips
self._distance_file = os.path.join(self._build_folder, self._distance_file_name)
self._station_meta_file = os.path.join(self._build_folder, self._station_meta_file_name)
def download(self, is_force: bool):
pass
def _station_dict_to_pd(self, station_dict):
"""convert dictionary of station information to pd series"""
return pd.Series(
[
station_dict["id"],
station_dict["capacity"],
station_dict["init"],
station_dict["lat"],
station_dict["lon"],
],
index=["station_index", "capacity", "init", "latitude", "longitude"])
def _gen_stations(self):
"""generate station meta csv"""
self._new_file_list.append(self._station_meta_file)
stations = pd.Series(self._stations).apply(self._station_dict_to_pd)
stations["station_index"] = pd.to_numeric(stations["station_index"], errors="coerce", downcast="integer")
stations["station_id"] = pd.to_numeric(stations["station_index"], errors="coerce", downcast="integer")
stations["capacity"] = pd.to_numeric(stations["capacity"], errors="coerce", downcast="integer")
stations["init"] = pd.to_numeric(stations["init"], errors="coerce", downcast="integer")
with open(self._station_meta_file, "w", encoding="utf-8", newline="") as f:
stations.to_csv(f, index=False, header=True)
return stations
def _gen_trip(self, tick):
"""generate trip record"""
ret_list = []
cur_probability = random.uniform(0, 1)
for trip in self._trips:
if trip["probability"] >= cur_probability:
ret = {}
ret["start_time"] = tick
ret["start_station_id"] = trip["start_id"]
ret["end_station_id"] = trip["end_id"]
ret["start_station_index"] = trip["start_id"]
ret["end_station_index"] = trip["end_id"]
ret["duration"] = random.uniform(0, 120)
ret_list.append(ret)
return ret_list
def _gen_trips(self):
"""generate trip records csv files"""
cur_tick = pd.to_datetime(self._start_time)
end_tick = pd.to_datetime(self._end_time)
trips = []
while cur_tick < end_tick:
new_trips = self._gen_trip(cur_tick)
trips.extend(new_trips)
cur_tick += pd.Timedelta(120, unit="second")
trips_df = pd.DataFrame.from_dict(trips)
trips_df["start_station_index"] = pd.to_numeric(trips_df["start_station_index"],
errors="coerce", downcast="integer")
trips_df["end_station_index"] = pd.to_numeric(trips_df["end_station_index"],
errors="coerce", downcast="integer")
self._new_file_list.append(self._clean_file)
with open(self._clean_file, "w", encoding="utf-8", newline="") as f:
trips_df.to_csv(f, index=False, header=True)
return trips_df
def _gen_distance(self, station_init: pd.DataFrame):
"""generate distance metrix csv file"""
distance_adj = pd.DataFrame(
0,
index=station_init["station_index"],
columns=station_init["station_index"],
dtype=np.float
)
look_up_df = station_init[["latitude", "longitude"]]
distance_df = distance_adj.apply(lambda x: pd.DataFrame(x).apply(lambda y: geopy.distance.distance(
(look_up_df.at[x.name, "latitude"], look_up_df.at[x.name, "longitude"]),
(look_up_df.at[y.name, "latitude"], look_up_df.at[y.name, "longitude"])
).km, axis=1), axis=1)
self._new_file_list.append(self._distance_file)
with open(self._distance_file, "w", encoding="utf-8", newline="") as f:
distance_df.to_csv(f, index=False, header=True)
return distance_df
def clean(self):
logger.info_green(f"Generating trip data for topology {self._topology} .")
super().clean()
stations = self._gen_stations()
self._gen_trips()
self._gen_distance(stations)
class WeatherToyPipeline(WeatherPipeline):
def __init__(self, topology: str, start_time: str, end_time: str, is_temp: bool = False):
"""
Generate weather data bin for the specified topology from frontierweather.com.
It will be generated in ~/.maro/data/citi_bike/[topology]/_build.
folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file
/source
/_download original data file
/_clean cleaned data file
/temp download temp file
Args:
topology(str): topology name of the data file
start_time(str): start time of the toy data
end_time(str): end time of the toy data
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__(topology, "", is_temp)
self._start_time = start_time
self._end_time = end_time
def download(self, is_force: bool):
pass
def clean(self):
logger.info_green("Cleaning weather data")
DataPipeline.clean(self)
self._new_file_list.append(self._clean_file)
self._preprocess(output_file=self._clean_file)
def _weather(self):
water = round(float(random.uniform(-1, 1)), 2)
snow = round(float(random.uniform(-1, 1)), 2)
if snow > 0.0 and water > 0:
return WeatherPipeline.WeatherEnum.SLEET.value
elif water > 0.0:
return WeatherPipeline.WeatherEnum.RAINY.value
elif snow > 0.0:
return WeatherPipeline.WeatherEnum.SNOWY.value
else:
return WeatherPipeline.WeatherEnum.SUNNY.value
def _gen_weather(self, tick):
date = tick.strftime("%m/%d/%Y %H:%M:%S")
wh = self._weather()
temp = round(float(random.uniform(-1, 1) * 40), 2)
return {"date": date, "weather": wh, "temp": temp}
def _preprocess(self, output_file: str):
data: list = []
cur_tick = pd.to_datetime(self._start_time)
end_tick = pd.to_datetime(self._end_time)
while cur_tick <= end_tick:
new_weather = self._gen_weather(cur_tick)
data.append(new_weather)
cur_tick += pd.Timedelta(1, unit="day")
with open(output_file, "w+") as fp:
writer = csv.DictWriter(fp, ["date", "weather", "temp"])
writer.writeheader()
writer.writerows(data)
class CitiBikeToyTopology(DataTopology):
"""
Data topology for a predefined toy topology of citi_bike scenario.
Args:
topology(str): Topology name of the data file.
config_path(str): Config file path of the topology.
is_temp(bool): (optional) If the data file is temporary.
"""
def __init__(self, topology: str, config_path: str, is_temp: bool = False):
super().__init__()
self._is_temp = is_temp
if config_path.startswith("~"):
config_path = os.path.expanduser(config_path)
if os.path.exists(config_path):
with open(config_path) as fp:
cfg = safe_load(fp)
self._data_pipeline["trip"] = CitiBikeToyPipeline(
start_time=cfg["start_time"],
end_time=cfg["end_time"],
stations=cfg["stations"],
trips=cfg["trips"],
topology=topology,
is_temp=is_temp
)
self._data_pipeline["weather"] = WeatherToyPipeline(
topology=topology,
start_time=cfg["start_time"],
end_time=cfg["end_time"],
is_temp=is_temp
)
else:
logger.warning(f"Config file {config_path} for toy topology {topology} not found.")
def download(self, is_force: bool = False):
pass
def __del__(self):
if self._is_temp:
self.remove()
class CitiBikeProcess:
"""
Contains all predefined data topologies of citi_bike scenario.
Args:
is_temp(bool): (optional) if the data file is temporary
"""
meta_file_name = "source_urls.yml"
meta_root = os.path.join(StaticParameter.data_root, "citi_bike/meta")
def __init__(self, is_temp: bool = False):
self.topologies = {}
self.meta_root = os.path.expanduser(self.meta_root)
self._meta_path = os.path.join(self.meta_root, self.meta_file_name)
with open(self._meta_path) as fp:
self._conf = safe_load(fp)
for topology in self._conf["trips"].keys():
if topology.startswith("toy"):
self.topologies[topology] = CitiBikeToyTopology(
topology=topology,
config_path=self._conf["trips"][topology]["toy_meta_path"],
is_temp=is_temp
)
else:
self.topologies[topology] = CitiBikeTopology(
topology=topology,
trip_source=self._conf["trips"][topology]["trip_remote_url"],
station_info=self._conf["station_info"]["ny_station_info_url"],
weather_source=self._conf["weather"][topology]["noaa_weather_url"],
is_temp=is_temp
)
class NOAAWeatherPipeline(WeatherPipeline):
def __init__(self, topology: str, source: str, is_temp: bool = False):
"""
Generate weather data bin for the specified topology from ncei.noaa.gov.
Generated files will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file
/source
/_download original data file
/_clean cleaned data file
/temp download temp file
Args:
topology(str): topology name of the data file
source(str): source url of original data file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__(topology, source, is_temp)
def clean(self):
super().clean()
if os.path.exists(self._download_file):
self._new_file_list.append(self._clean_file)
logger.info_green("Cleaning weather data")
self._preprocess(input_file=self._download_file, output_file=self._clean_file)
else:
logger.warning(f"Not found downloaded weather data: {self._download_file}")
def _weather(self, row):
water = row["PRCP"] if row["PRCP"] is not None else 0.0
snow = row["SNOW"] if row["SNOW"] is not None else 0.0
if snow > 0.0 and water > 0:
return WeatherPipeline.WeatherEnum.SLEET.value
elif water > 0.0:
return WeatherPipeline.WeatherEnum.RAINY.value
elif snow > 0.0:
return WeatherPipeline.WeatherEnum.SNOWY.value
else:
return WeatherPipeline.WeatherEnum.SUNNY.value
def _preprocess(self, input_file: str, output_file: str):
data: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
# import re
def processFE_df(df):
"""Function to process Pandas dataframe from Funds Explorer site:
'https://www.fundsexplorer.com.br/ranking'
After this function the DataFrame can be filtered to analysis
Args:
df ([type]): pandas.core.frame.DataFrame
Returns:
[type]: pandas.core.frame.DataFrame
"""
df.columns = ['codigo', 'setor', 'precoatualR$', 'liqdiariaNeg',
'dividR$', 'divyield%', 'dy3macum%', 'dy6macum%',
'dy12macum%', 'dy3mmedia%', 'dy6mmedia%', 'dy12mmedia%',
'dyano%', 'varpreco%', 'rentper%', 'rentacum%',
'patrliqR$', 'vpaR$', 'p/vpaN', 'dypatr%', 'varpatr%',
'rentpatrper%', 'rentpatracum%', 'vacfisica%',
'vacfinan%', 'qtdativosN']
df = df.applymap(lambda x: str(x).replace('R$', ''))
df = df.applymap(lambda x: str(x).replace('%', ''))
df['precoatualR$'] = df['precoatualR$'].apply(lambda x:
str(x).replace('.', ''))
df['patrliqR$'] = df['patrliqR$'].apply(lambda x:
str(x).replace('.', ''))
df['vpaR$'] = df['vpaR$'].apply(lambda x: str(x).replace('.', ''))
df = df.applymap(lambda x: str(x).replace(',', '.'))
df['setor'] = df['setor'].apply(lambda x: str(x).replace('Ã', 'i'))
# df['setor'] = df['setor'].apply(lambda x: re.sub(r'Ã ', 'i', x))
df['codigo'] = df['codigo'].astype('string')
df['setor'] = df['setor'].astype('string')
df['precoatualR$'] = pd.to_numeric(df['precoatualR$'], errors='coerce')
df['liqdiariaNeg'] = pd.to_numeric(df['liqdiariaNeg'], errors='coerce')
df['dividR$'] = pd.to_numeric(df['dividR$'], errors='coerce')
df['divyield%'] = pd.to_numeric(df['divyield%'], errors='coerce')
df['dy3macum%'] = pd.to_numeric(df['dy3macum%'], errors='coerce')
df['dy6macum%'] = pd.to_numeric(df['dy6macum%'], errors='coerce')
df['dy12macum%'] = pd.to_numeric(df['dy12macum%'], errors='coerce')
df['dy3mmedia%'] = pd.to_numeric(df['dy3mmedia%'], errors='coerce')
df['dy6mmedia%'] = pd.to_numeric(df['dy6mmedia%'], errors='coerce')
df['dy12mmedia%'] = pd.to_numeric(df['dy12mmedia%'], errors='coerce')
df['dyano%'] = pd.to_numeric(df['dyano%'], errors='coerce')
df['varpreco%'] = pd.to_numeric(df['varpreco%'], errors='coerce')
df['rentper%'] = pd.to_numeric(df['rentper%'], errors='coerce')
df['rentacum%'] = pd.to_numeric(df['rentacum%'], errors='coerce')
df['patrliqR$'] = pd.to_numeric(df['patrliqR$'], errors='coerce')
df['vpaR$'] = | pd.to_numeric(df['vpaR$'], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import pandas.testing as tm
import numpy as np
from pandas_xyz import algorithms as algs
class TestAlgorithms(unittest.TestCase):
def test_displacement(self):
"""Test out my distance algorithm with hand calcs."""
lon = pd.Series([0.0, 0.0, 0.0])
lon_ew = pd.Series([0.0, 1.0, 2.0])
lat = pd.Series([0.0, 0.0, 0.0])
lat_ns = pd.Series([0.0, 1.0, 2.0])
disp_ew = algs.ds_from_xy(lat, lon_ew)
self.assertIsInstance(disp_ew, pd.Series)
tm.assert_series_equal(
disp_ew,
6371000 * 1.0 * np.pi / 180 * pd.Series([0, 1, 1]),
)
disp_ns = algs.ds_from_xy(lat_ns, lon)
self.assertIsInstance(disp_ns, pd.Series)
tm.assert_series_equal(
disp_ns,
6371000 * 1.0 * np.pi / 180 * pd.Series([0, 1, 1]),
)
def test_clean_series(self):
lat = pd.Series([np.nan, 40.0, np.nan, np.nan, 41.0, np.nan])
lat_clean = algs._clean_series(lat)
self.assertFalse(lat_clean.isna().any())
with self.assertRaises(ValueError):
algs._clean_series( | pd.Series([np.nan, np.nan, np.nan]) | pandas.Series |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_variable_apply1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable_apply2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# TODO: this crashes on Travis (3 process config) with size 1
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').{}()\n".format(w, func_name)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_apply_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_series_fixed1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = pd.Series([0, 1, 2, -2, 4])
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(S, w, c):\n return S.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
pd.testing.assert_series_equal(hpat_func(S1, *args), test_impl(S1, *args))
pd.testing.assert_series_equal(hpat_func(S2, *args), test_impl(S2, *args))
# test apply
def apply_test_impl(S, w, c):
return S.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(apply_test_impl)
for args in itertools.product(wins, centers):
pd.testing.assert_series_equal(hpat_func(S1, *args), apply_test_impl(S1, *args))
pd.testing.assert_series_equal(hpat_func(S2, *args), apply_test_impl(S2, *args))
@skip_numba_jit
def test_series_cov1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = pd.Series([0, 1, 2, -2, 4])
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
def test_impl(S, S2, w, c):
return S.rolling(w, center=c).cov(S2)
hpat_func = self.jit(test_impl)
for args in itertools.product([S1, S2], [S1, S2], wins, centers):
pd.testing.assert_series_equal(hpat_func(*args), test_impl(*args))
pd.testing.assert_series_equal(hpat_func(*args), test_impl(*args))
def test_impl2(S, S2, w, c):
return S.rolling(w, center=c).corr(S2)
hpat_func = self.jit(test_impl2)
for args in itertools.product([S1, S2], [S1, S2], wins, centers):
pd.testing.assert_series_equal(hpat_func(*args), test_impl2(*args))
pd.testing.assert_series_equal(hpat_func(*args), test_impl2(*args))
@skip_numba_jit
def test_df_cov1(self):
# test series rolling functions
# all functions except apply
df1 = pd.DataFrame({'A': [0, 1, 2, np.nan, 4], 'B': np.ones(5)})
df2 = pd.DataFrame({'A': [0, 1, 2, -2, 4], 'C': np.ones(5)})
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
def test_impl(df, df2, w, c):
return df.rolling(w, center=c).cov(df2)
hpat_func = self.jit(test_impl)
for args in itertools.product([df1, df2], [df1, df2], wins, centers):
pd.testing.assert_frame_equal(hpat_func(*args), test_impl(*args))
pd.testing.assert_frame_equal(hpat_func(*args), test_impl(*args))
def test_impl2(df, df2, w, c):
return df.rolling(w, center=c).corr(df2)
hpat_func = self.jit(test_impl2)
for args in itertools.product([df1, df2], [df1, df2], wins, centers):
pd.testing.assert_frame_equal(hpat_func(*args), test_impl2(*args))
pd.testing.assert_frame_equal(hpat_func(*args), test_impl2(*args))
def _get_assert_equal(self, obj):
if isinstance(obj, pd.Series):
return pd.testing.assert_series_equal
elif isinstance(obj, pd.DataFrame):
return pd.testing.assert_frame_equal
elif isinstance(obj, np.ndarray):
return np.testing.assert_array_equal
return self.assertEqual
def _test_rolling_unsupported_values(self, obj):
def test_impl(obj, window, min_periods, center,
win_type, on, axis, closed):
return obj.rolling(window, min_periods, center,
win_type, on, axis, closed).min()
hpat_func = self.jit(test_impl)
with self.assertRaises(ValueError) as raises:
hpat_func(obj, -1, None, False, None, None, 0, None)
self.assertIn('window must be non-negative', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, -1, False, None, None, 0, None)
self.assertIn('min_periods must be >= 0', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, 2, False, None, None, 0, None)
self.assertIn('min_periods must be <= window', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, 2, False, None, None, 0, None)
self.assertIn('min_periods must be <= window', str(raises.exception))
msg_tmpl = 'Method rolling(). The object {}\n expected: {}'
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, True, None, None, 0, None)
msg = msg_tmpl.format('center', 'False')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, 'None', None, 0, None)
msg = msg_tmpl.format('win_type', 'None')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, None, 'None', 0, None)
msg = msg_tmpl.format('on', 'None')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, None, None, 1, None)
msg = msg_tmpl.format('axis', '0')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, None, None, 0, 'None')
msg = msg_tmpl.format('closed', 'None')
self.assertIn(msg, str(raises.exception))
def _test_rolling_unsupported_types(self, obj):
def test_impl(obj, window, min_periods, center,
win_type, on, axis, closed):
return obj.rolling(window, min_periods, center,
win_type, on, axis, closed).min()
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, '1', None, False, None, None, 0, None)
msg = msg_tmpl.format('window', 'unicode_type', 'int')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, '1', False, None, None, 0, None)
msg = msg_tmpl.format('min_periods', 'unicode_type', 'None, int')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, 0, None, None, 0, None)
msg = msg_tmpl.format('center', 'int64', 'bool')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, -1, None, 0, None)
msg = msg_tmpl.format('win_type', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, None, -1, 0, None)
msg = msg_tmpl.format('on', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, None, None, None, None)
msg = msg_tmpl.format('axis', 'none', 'int, str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, None, None, 0, -1)
msg = msg_tmpl.format('closed', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
def _test_rolling_apply_mean(self, obj):
def test_impl(obj, window, min_periods):
def func(x):
if len(x) == 0:
return np.nan
return x.mean()
return obj.rolling(window, min_periods).apply(func)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_apply_unsupported_types(self, obj):
def test_impl(obj, raw):
def func(x):
if len(x) == 0:
return np.nan
return np.median(x)
return obj.rolling(3).apply(func, raw=raw)
hpat_func = self.jit(test_impl)
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1)
msg = 'Method rolling.apply(). The object raw\n given: int64\n expected: bool'
self.assertIn(msg, str(raises.exception))
def _test_rolling_apply_args(self, obj):
def test_impl(obj, window, min_periods, q):
def func(x, q):
if len(x) == 0:
return np.nan
return np.quantile(x, q)
return obj.rolling(window, min_periods).apply(func, raw=None, args=(q,))
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
for q in [0.25, 0.5, 0.75]:
with self.subTest(obj=obj, window=window,
min_periods=min_periods, q=q):
jit_result = hpat_func(obj, window, min_periods, q)
ref_result = test_impl(obj, window, min_periods, q)
assert_equal(jit_result, ref_result)
def _test_rolling_corr(self, obj, other):
def test_impl(obj, window, min_periods, other):
return obj.rolling(window, min_periods).corr(other)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(obj=obj, other=other,
window=window, min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods, other)
ref_result = test_impl(obj, window, min_periods, other)
assert_equal(jit_result, ref_result)
def _test_rolling_corr_with_no_other(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).corr(pairwise=False)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_corr_unsupported_types(self, obj):
def test_impl(obj, pairwise):
return obj.rolling(3, 3).corr(pairwise=pairwise)
hpat_func = self.jit(test_impl)
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1)
msg = 'Method rolling.corr(). The object pairwise\n given: int64\n expected: bool'
self.assertIn(msg, str(raises.exception))
def _test_rolling_count(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).count()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_cov(self, obj, other):
def test_impl(obj, window, min_periods, other, ddof):
return obj.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(obj=obj, other=other, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(obj, window, min_periods, other, ddof)
ref_result = test_impl(obj, window, min_periods, other, ddof)
assert_equal(jit_result, ref_result)
def _test_rolling_cov_with_no_other(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).cov(pairwise=False)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_cov_unsupported_types(self, obj):
def test_impl(obj, pairwise, ddof):
return obj.rolling(3, 3).cov(pairwise=pairwise, ddof=ddof)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.cov(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, 1)
msg = msg_tmpl.format('pairwise', 'int64', 'bool')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, None, '1')
msg = msg_tmpl.format('ddof', 'unicode_type', 'int')
self.assertIn(msg, str(raises.exception))
def _test_rolling_kurt(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).kurt()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(4, len(obj) + 1):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
ref_result = test_impl(obj, window, min_periods)
jit_result = hpat_func(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_max(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).max()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
# python implementation crashes if window = 0, jit works correctly
for window in range(1, len(obj) + 2):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_mean(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).mean()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(len(obj) + 2):
for min_periods in range(window):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_median(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).median()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_min(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).min()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
# python implementation crashes if window = 0, jit works correctly
for window in range(1, len(obj) + 2):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_quantile(self, obj):
def test_impl(obj, window, min_periods, quantile):
return obj.rolling(window, min_periods).quantile(quantile)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
quantiles = [0, 0.25, 0.5, 0.75, 1]
for window in range(0, len(obj) + 3, 2):
for min_periods, q in product(range(0, window, 2), quantiles):
with self.subTest(obj=obj, window=window,
min_periods=min_periods, quantiles=q):
jit_result = hpat_func(obj, window, min_periods, q)
ref_result = test_impl(obj, window, min_periods, q)
assert_equal(jit_result, ref_result)
def _test_rolling_quantile_exception_unsupported_types(self, obj):
def test_impl(obj, quantile, interpolation):
return obj.rolling(3, 2).quantile(quantile, interpolation)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.quantile(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, '0.5', 'linear')
msg = msg_tmpl.format('quantile', 'unicode_type', 'float')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 0.5, None)
msg = msg_tmpl.format('interpolation', 'none', 'str')
self.assertIn(msg, str(raises.exception))
def _test_rolling_quantile_exception_unsupported_values(self, obj):
def test_impl(obj, quantile, interpolation):
return obj.rolling(3, 2).quantile(quantile, interpolation)
hpat_func = self.jit(test_impl)
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 2, 'linear')
self.assertIn('quantile value not in [0, 1]', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 0.5, 'lower')
self.assertIn('interpolation value not "linear"', str(raises.exception))
def _test_rolling_skew(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).skew()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(3, len(obj) + 1):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
ref_result = test_impl(obj, window, min_periods)
jit_result = hpat_func(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_std(self, obj):
test_impl = rolling_std_usecase
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(obj=obj, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(obj, window, min_periods, ddof)
ref_result = test_impl(obj, window, min_periods, ddof)
assert_equal(jit_result, ref_result)
def _test_rolling_std_exception_unsupported_ddof(self, obj):
test_impl = rolling_std_usecase
hpat_func = self.jit(test_impl)
window, min_periods, invalid_ddof = 3, 2, '1'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, window, min_periods, invalid_ddof)
msg = 'Method rolling.std(). The object ddof\n given: unicode_type\n expected: int'
self.assertIn(msg, str(raises.exception))
def _test_rolling_sum(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).sum()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(len(obj) + 2):
for min_periods in range(window):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_var(self, obj):
test_impl = rolling_var_usecase
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(obj=obj, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(obj, window, min_periods, ddof)
ref_result = test_impl(obj, window, min_periods, ddof)
assert_equal(jit_result, ref_result)
def _test_rolling_var_exception_unsupported_ddof(self, obj):
test_impl = rolling_var_usecase
hpat_func = self.jit(test_impl)
window, min_periods, invalid_ddof = 3, 2, '1'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, window, min_periods, invalid_ddof)
msg = 'Method rolling.var(). The object ddof\n given: unicode_type\n expected: int'
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('DataFrame.rolling.min() unsupported exceptions')
def test_df_rolling_unsupported_values(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_unsupported_values(df)
@skip_sdc_jit('DataFrame.rolling.min() unsupported exceptions')
def test_df_rolling_unsupported_types(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.apply() unsupported')
def test_df_rolling_apply_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_apply_mean(df)
@skip_sdc_jit('DataFrame.rolling.apply() unsupported exceptions')
def test_df_rolling_apply_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_apply_unsupported_types(df)
@unittest.skip('DataFrame.rolling.apply() unsupported args')
def test_df_rolling_apply_args(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_apply_args(df)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported')
def test_df_rolling_corr(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
for d in all_data:
other = pd.Series(d)
self._test_rolling_corr(df, other)
other_all_data = deepcopy(all_data) + [list(range(10))[::-1]]
other_all_data[1] = [-1., 1., 0., -0.1, 0.1, 0.]
other_length = min(len(d) for d in other_all_data)
other_data = {n: d[:other_length] for n, d in zip(string.ascii_uppercase, other_all_data)}
other = pd.DataFrame(other_data)
self._test_rolling_corr(df, other)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported')
def test_df_rolling_corr_no_other(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_corr_with_no_other(df)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported exceptions')
def test_df_rolling_corr_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_corr_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported exceptions')
def test_df_rolling_corr_unsupported_values(self):
def test_impl(df, other, pairwise):
return df.rolling(3, 3).corr(other=other, pairwise=pairwise)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.corr(). The object pairwise\n expected: {}'
df = pd.DataFrame({'A': [1., -1., 0., 0.1, -0.1],
'B': [-1., 1., 0., -0.1, 0.1]})
for pairwise in [None, True]:
with self.assertRaises(ValueError) as raises:
hpat_func(df, None, pairwise)
self.assertIn(msg_tmpl.format('False'), str(raises.exception))
other = pd.DataFrame({'A': [-1., 1., 0., -0.1, 0.1],
'C': [1., -1., 0., 0.1, -0.1]})
with self.assertRaises(ValueError) as raises:
hpat_func(df, other, True)
self.assertIn(msg_tmpl.format('False, None'), str(raises.exception))
@skip_sdc_jit('DataFrame.rolling.count() unsupported')
def test_df_rolling_count(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_count(df)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported')
def test_df_rolling_cov(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
for d in all_data:
other = pd.Series(d)
self._test_rolling_cov(df, other)
other_all_data = deepcopy(all_data) + [list(range(10))[::-1]]
other_all_data[1] = [-1., 1., 0., -0.1, 0.1]
other_length = min(len(d) for d in other_all_data)
other_data = {n: d[:other_length] for n, d in zip(string.ascii_uppercase, other_all_data)}
other = pd.DataFrame(other_data)
self._test_rolling_cov(df, other)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported')
def test_df_rolling_cov_no_other(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_cov_with_no_other(df)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported exceptions')
def test_df_rolling_cov_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_cov_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported exceptions')
def test_df_rolling_cov_unsupported_values(self):
def test_impl(df, other, pairwise):
return df.rolling(3, 3).cov(other=other, pairwise=pairwise)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.cov(). The object pairwise\n expected: {}'
df = pd.DataFrame({'A': [1., -1., 0., 0.1, -0.1],
'B': [-1., 1., 0., -0.1, 0.1]})
for pairwise in [None, True]:
with self.assertRaises(ValueError) as raises:
hpat_func(df, None, pairwise)
self.assertIn(msg_tmpl.format('False'), str(raises.exception))
other = pd.DataFrame({'A': [-1., 1., 0., -0.1, 0.1],
'C': [1., -1., 0., 0.1, -0.1]})
with self.assertRaises(ValueError) as raises:
hpat_func(df, other, True)
self.assertIn(msg_tmpl.format('False, None'), str(raises.exception))
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
@unittest.expectedFailure
def test_df_rolling_cov_issue_floating_point_rounding(self):
"""
Cover issue of different float rounding in Python and SDC/Numba:
s = np.Series([1., -1., 0., 0.1, -0.1])
s.rolling(2, 0).mean()
Python: SDC/Numba:
0 1.000000e+00 0 1.00
1 0.000000e+00 1 0.00
2 -5.000000e-01 2 -0.50
3 5.000000e-02 3 0.05
4 -1.387779e-17 4 0.00
dtype: float64 dtype: float64
BTW: cov uses mean inside itself
"""
def test_impl(df, window, min_periods, other, ddof):
return df.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
df = pd.DataFrame({'A': [1., -1., 0., 0.1, -0.1]})
other = pd.DataFrame({'A': [-1., 1., 0., -0.1, 0.1, 0.]})
jit_result = hpat_func(df, 2, 0, other, 1)
ref_result = test_impl(df, 2, 0, other, 1)
pd.testing.assert_frame_equal(jit_result, ref_result)
@skip_sdc_jit('DataFrame.rolling.kurt() unsupported')
def test_df_rolling_kurt(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_kurt(df)
@skip_sdc_jit('DataFrame.rolling.max() unsupported')
def test_df_rolling_max(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_max(df)
@skip_sdc_jit('DataFrame.rolling.mean() unsupported')
def test_df_rolling_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_mean(df)
@skip_sdc_jit('DataFrame.rolling.median() unsupported')
def test_df_rolling_median(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_median(df)
@skip_sdc_jit('DataFrame.rolling.min() unsupported')
def test_df_rolling_min(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_min(df)
@unittest.expectedFailure
@unittest.skipIf(platform.system() == 'Darwin', 'Segmentation fault on Mac')
@skip_sdc_jit('DataFrame.rolling.min() unsupported')
def test_df_rolling_min_exception_many_columns(self):
def test_impl(df):
return df.rolling(3).min()
hpat_func = self.jit(test_impl)
# more than 19 columns raise SystemError: CPUDispatcher() returned a result with an error set
all_data = test_global_input_data_float64 * 5
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_sdc_jit('DataFrame.rolling.quantile() unsupported')
def test_df_rolling_quantile(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_quantile(df)
@skip_sdc_jit('DataFrame.rolling.quantile() unsupported exceptions')
def test_df_rolling_quantile_exception_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_quantile_exception_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.quantile() unsupported exceptions')
def test_df_rolling_quantile_exception_unsupported_values(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_quantile_exception_unsupported_values(df)
@skip_sdc_jit('DataFrame.rolling.skew() unsupported')
def test_df_rolling_skew(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_skew(df)
@skip_sdc_jit('DataFrame.rolling.std() unsupported')
def test_df_rolling_std(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_std(df)
@skip_sdc_jit('DataFrame.rolling.std() unsupported exceptions')
def test_df_rolling_std_exception_unsupported_ddof(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_std_exception_unsupported_ddof(df)
@skip_sdc_jit('DataFrame.rolling.sum() unsupported')
def test_df_rolling_sum(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_sum(df)
@skip_sdc_jit('DataFrame.rolling.var() unsupported')
def test_df_rolling_var(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_var(df)
@skip_sdc_jit('DataFrame.rolling.var() unsupported exceptions')
def test_df_rolling_var_exception_unsupported_ddof(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_var_exception_unsupported_ddof(df)
@skip_sdc_jit('Series.rolling.min() unsupported exceptions')
def test_series_rolling_unsupported_values(self):
series = pd.Series(test_global_input_data_float64[0])
self._test_rolling_unsupported_values(series)
@skip_sdc_jit('Series.rolling.min() unsupported exceptions')
def test_series_rolling_unsupported_types(self):
series = pd.Series(test_global_input_data_float64[0])
self._test_rolling_unsupported_types(series)
@skip_sdc_jit('Series.rolling.apply() unsupported Series index')
def test_series_rolling_apply_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_apply_mean(series)
@skip_sdc_jit('Series.rolling.apply() unsupported exceptions')
def test_series_rolling_apply_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_apply_unsupported_types(series)
@unittest.skip('Series.rolling.apply() unsupported args')
def test_series_rolling_apply_args(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_apply_args(series)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[-1., 1., 0., -0.1, 0.1, 0.],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for main_data, other_data in product(all_data, all_data):
series = pd.Series(main_data)
other = pd.Series(other_data)
self._test_rolling_corr(series, other)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr_diff_length(self):
def test_impl(series, window, other):
return series.rolling(window).corr(other)
hpat_func = self.jit(test_impl)
series = pd.Series([1., -1., 0., 0.1, -0.1])
other = pd.Series(gen_frand_array(40))
window = 5
jit_result = hpat_func(series, window, other)
ref_result = test_impl(series, window, other)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr_with_no_other(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for data in all_data:
series = pd.Series(data)
self._test_rolling_corr_with_no_other(series)
@skip_sdc_jit('Series.rolling.corr() unsupported exceptions')
def test_series_rolling_corr_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_corr_unsupported_types(series)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
@unittest.expectedFailure # https://jira.devtools.intel.com/browse/SAT-2377
def test_series_rolling_corr_index(self):
def test_impl(S1, S2):
return S1.rolling(window=3).corr(S2)
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_values = np.arange(n)
np.random.shuffle(index_values)
S1 = pd.Series(np.arange(n), index=index_values, name='A')
np.random.shuffle(index_values)
S2 = pd.Series(2 * np.arange(n) - 5, index=index_values, name='B')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit('Series.rolling.count() unsupported Series index')
def test_series_rolling_count(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_count(series)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov(self):
all_data = [
list(range(5)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for main_data, other_data in product(all_data, all_data):
series = pd.Series(main_data)
other = pd.Series(other_data)
self._test_rolling_cov(series, other)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov_diff_length(self):
def test_impl(series, window, other):
return series.rolling(window).cov(other)
hpat_func = self.jit(test_impl)
series = pd.Series([1., -1., 0., 0.1, -0.1])
other = pd.Series(gen_frand_array(40))
window = 5
jit_result = hpat_func(series, window, other)
ref_result = test_impl(series, window, other)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov_no_other(self):
all_data = [
list(range(5)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for data in all_data:
series = pd.Series(data)
self._test_rolling_cov_with_no_other(series)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
@unittest.expectedFailure
def test_series_rolling_cov_issue_floating_point_rounding(self):
"""Cover issue of different float rounding in Python and SDC/Numba"""
def test_impl(series, window, min_periods, other, ddof):
return series.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
series = pd.Series(list(range(10)))
other = pd.Series([1., -1., 0., 0.1, -0.1])
jit_result = hpat_func(series, 6, 0, other, 1)
ref_result = test_impl(series, 6, 0, other, 1)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported exceptions')
def test_series_rolling_cov_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_cov_unsupported_types(series)
@skip_sdc_jit('Series.rolling.kurt() unsupported Series index')
def test_series_rolling_kurt(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_kurt(series)
@skip_sdc_jit('Series.rolling.max() unsupported Series index')
def test_series_rolling_max(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_max(series)
@skip_sdc_jit('Series.rolling.mean() unsupported Series index')
def test_series_rolling_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_mean(series)
@skip_sdc_jit('Series.rolling.median() unsupported Series index')
def test_series_rolling_median(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_median(series)
@skip_sdc_jit('Series.rolling.min() unsupported Series index')
def test_series_rolling_min(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_min(series)
@skip_sdc_jit('Series.rolling.quantile() unsupported Series index')
def test_series_rolling_quantile(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_quantile(series)
@skip_sdc_jit('Series.rolling.quantile() unsupported exceptions')
def test_series_rolling_quantile_exception_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_quantile_exception_unsupported_types(series)
@skip_sdc_jit('Series.rolling.quantile() unsupported exceptions')
def test_series_rolling_quantile_exception_unsupported_values(self):
series = | pd.Series([1., -1., 0., 0.1, -0.1]) | pandas.Series |
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.core.base import DataError
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
def get_dtype(dtype, coerce_int=None):
if coerce_int is False and "int" in dtype:
return None
if dtype != "category":
return np.dtype(dtype)
return dtype
@pytest.mark.parametrize(
"method, data, expected_data, coerce_int, min_periods",
[
("count", np.arange(5), [1, 2, 2, 2, 2], True, 0),
("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True, 0),
("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False, 0),
("max", np.arange(5), [np.nan, 1, 2, 3, 4], True, None),
("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True, None),
("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False, None),
("min", np.arange(5), [np.nan, 0, 1, 2, 3], True, None),
("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True, None),
("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False, None),
("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True, None),
("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True, None),
("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False, None),
("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False, None),
("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True, None),
("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True, None),
(
"std",
[0, 1, 2, np.nan, 4],
[np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2,
False,
None,
),
("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True, None),
("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True, None),
("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False, None),
("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
(
"median",
[0, 1, 2, np.nan, 4],
[np.nan, 0.5, 1.5, np.nan, np.nan],
False,
None,
),
],
)
def test_series_dtypes(method, data, expected_data, coerce_int, dtypes, min_periods):
s = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int))
if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
getattr(s.rolling(2, min_periods=min_periods), method)()
else:
result = getattr(s.rolling(2, min_periods=min_periods), method)()
expected = Series(expected_data, dtype="float64")
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"method, expected_data, min_periods",
[
("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, 0),
(
"max",
{0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},
None,
),
(
"min",
{0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},
None,
),
(
"sum",
{0: Series([np.nan, 2, 6, 10, 14]), 1: | Series([np.nan, 4, 8, 12, 16]) | pandas.Series |
import pandas as pd
import numpy as np
###SCRIPT DESCRIPTION###
# This script provides statistical analysis for LSTM labeled data.
###SCRIPT INPUT###
# The .csv file given to this script should be equivalent to the labeled output generated by the corresponding
# data-generation.py script. This means a "Format" column must be present with a unique identifier for each
# unique format. The column "true_label" should hold the integer value of the true class of the sample, the
# column "label" should hold the integer value of the predicted class of the sample.
###SCRIPT OUTPUT###
# This script provides the following values in a .csv file
# - Classwise precision, accuracy and recall for each format
# - Formatwise accuracy
###SCRIPT BEGIN####
input_file = input("Path to test results file: ")
output_file = input("Path to output file: ")
# Load data
data = pd.read_csv(input_file)
# Initial column names and row list
rows = []
cols = ['Format ID', 'Format Example', 'Sample Count']
#each class has a column for precision, recall and accuracy
for cl in data['true_label'].unique():
cols.append(str(cl) + "_precision")
cols.append(str(cl) + "_recall")
cols.append(str(cl) + "_accuracy")
#add format accuracy at the end
cols.append("Format Accuracy")
#for each unique format ID
for format in data['Format'].unique():
#create a subset containing only entries from this format
subset = data[data['Format'] == format]
#find the number of rows this format has
n = subset.shape[0]
#get one example of the format
example = subset['Date'].iloc[0]
row = [format, example, n]
# for each class that truly exists
for cl in data['true_label'].unique():
#create subset with all samples in the format that have this class
class_subset = subset[subset['true_label'] == cl]
#create subset with all samples in the format that are predicted as this class
predicted_subset = subset[subset['label'] == cl]
#create subset with all samples in the format that are not predicted as this class
negative_subset = subset[subset['label'] != cl]
#get indices of rows where this class was correctly classified
correct = np.where(class_subset['true_label'] == class_subset['label'])
#get amount of real, predicted and correctly predicted values of this class
real = class_subset.shape[0]
predicted = predicted_subset.shape[0]
correctly_predicted = len(correct[0])
true_negatives = negative_subset[negative_subset['true_label'] != cl].shape[0]
#precision = True Positives / Predicted
precision = (correctly_predicted / predicted) if predicted > 0 else "N/A" if real == 0 else 0
#recall = True Positives / Real Samples
recall = (correctly_predicted / real) if real > 0 else "N/A"
#accuracy = True Positives + True Negatives / All Format Samples
accuracy = (correctly_predicted + true_negatives) / n
#Add formatwise precision, recall and accuracy to the row
row += [precision, recall, accuracy]
#Add format accuracy to the row (all matching entries / all entries)
acc = subset[subset['label'] == subset['true_label']]
row.append(acc.shape[0] / n)
rows.append(row)
#output dataframe
df= | pd.DataFrame(rows, columns=cols) | pandas.DataFrame |
from flask import Flask, jsonify, request, json
import joblib
import pandas as pd
app = Flask(__name__)
app.config['SECRET_KEY'] = 'KEY'
#rutas raiz
@app.route('/movies', methods=['POST'])
def recommended():
try:
cosine_sim = joblib.load("./models/modelCosine.pkl")
df_movies = | pd.read_csv('./data/df_movies.csv') | pandas.read_csv |
from backlight.strategies import filter as module
import pytest
import pandas as pd
import numpy as np
import backlight
from backlight.strategies.amount_based import simple_entry_and_exit
@pytest.fixture
def signal():
symbol = "usdjpy"
periods = 22
df = pd.DataFrame(
index= | pd.date_range(start="2018-06-06", freq="1min", periods=periods) | pandas.date_range |
#getting data from the internet
import sys
import csv
import pandas as pd
import requests
from bs4 import BeautifulSoup
pd.set_option('max_columns', 50)
def get_all(weeknum):
print('getting stats')
print('pulling ESPN lines')
get_espn_lines(weeknum)
#print('pulling ESPN team stats')
#get_espn_stats(weeknum)
print('pulling ESPN team standings')
get_espn_standings(weeknum)
print('pulling NFL injury data')
get_injuries_stats(weeknum)
print('finished pulling data')
#get the NFL lines from espn (point favorites)
def get_espn_lines(weeknum):
url = "http://www.espn.com/nfl/lines"
req = requests.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
lines_table = soup.find('table', class_='tablehead')
# take the data from the html table, add it to the python list
list_of_rows = []
labels_of_frame = ['Name of Game','Betting Source', 'Team Line', 'Raw Line']
list_of_rows.append(labels_of_frame)
for row in lines_table.findAll('tr'):
# list of attributes for one row/game
list_of_cells = []
for cell in row.findAll(["th","td"]):
# individual stats/cells
text = cell.text
# trying to fix formatting for later
#if text == 'POINT SPREAD' or text == 'TOTAL' or text == 'MONEY LINE' or text == 'N/A':
#list_of_cells.append(text)
list_of_cells.append(text)
# remove the '\xa0' that was being produced in the list
#list_of_cells = [el.replace('\xa0','BETTING SOURCE') for el in list_of_cells]
# remove the 'PickCenter » ' at the end of each game
#list_of_cells = [el.replace('PickCenter » ','-------------------------------------------------') for el in list_of_cells]
# take out unneeded data
if list_of_cells[0] == 'Westgate' or list_of_cells[0] == 'Caesar\'s' or list_of_cells[0] == '<NAME>' or list_of_cells[0] == 'Wynn' or list_of_cells[0] == 'Unibet':
list_of_cells = list_of_cells[0:4]
list_of_cells = [list_of_cells[-1]] + list_of_cells[:-1]
list_of_cells[0] = ''
elif len(list_of_cells[0]) < 25:
continue
# add in name of each game, as well as labels for data
elif len(list_of_cells) == 1:
game_name = [list_of_cells[0],'','','']
list_of_rows.append(game_name)
continue
# append row/game attributes to main list
list_of_rows.append(list_of_cells)
#print(list_of_cells)
# printing if needed
#for item in list_of_rows:
#print(' '.join(item)) #print it pretty
#print(item) #print it less pretty
df_raw_lines = pd.DataFrame(list_of_rows)
df_raw_lines = df_raw_lines[1:] #take the data less the header row
df_raw_lines.columns = ['Name of Game', 'Betting Source', 'Team Line', 'Raw Line']
#print(df_raw_lines)
# assign variables to count row numbers
row_start = 1
row_end = 6
row_count_line = 1
# find the line data from the dataframe
game = df_raw_lines.loc[row_start:row_end]
# dictionary to add team and line info to
team_lines = {}
# loop to go through every game
while game.empty == False:
# set the variables
team1_avg_line = 0
team2_avg_line = 0
count = 0
#print(game)
# for each line per game
for numset in game.loc[:, 'Raw Line']:
# first line is 'NaN', so only do if a string
if numset != '':
if len(numset) == 4:
count += 1
team1_line = float(numset[:2])
team2_line = float(numset[2:])
#print(str(team1_line)+' team1 line')
#print(str(team2_line)+' team2 line')
#print('')
team1_avg_line += team1_line
team2_avg_line += team2_line
elif len(numset) == 8:
count += 1
team1_line = float(numset[:4])
team2_line = float(numset[4:])
#print(str(team1_line)+' team1 line')
#print(str(team2_line)+' team2 line')
#print('')
team1_avg_line += team1_line
team2_avg_line += team2_line
elif len(numset) == 6:
count += 1
team1_line = float(numset[:3])
team2_line = float(numset[3:])
#print(str(team1_line)+' team1 line')
#print(str(team2_line)+' team2 line')
#print('')
team1_avg_line += team1_line
team2_avg_line += team2_line
elif len(numset) == 10:
count += 1
team1_line = float(numset[:5])
team2_line = float(numset[5:])
#print(str(team1_line)+' team1 line')
#print(str(team2_line)+' team2 line')
#print('')
team1_avg_line += team1_line
team2_avg_line += team2_line
elif numset == 'N/A':
pass
elif numset == 'EVEN':
print(game)
print(numset)
count += 1
else:
if df_raw_lines.loc[row_count_line, 'Team Line'] == 'EVEN':
count += 1
pass
else:
print(row_count_line)
print (df_raw_lines.loc[row_count_line, 'Team Line'])
print (numset)
print(game.to_string())
sys.exit('ERROR: UNKNOWN AVG LINE LENGTH')
row_count_line += 1
#print (game.loc[:, 'Raw Line'])
if count != 0:
team1_avg_line = team1_avg_line / count
team2_avg_line = team2_avg_line / count
elif count == 0:
team1_avg_line = 'N/A'
team2_avg_line = 'N/A'
#print(str(team1_avg_line)+' team1 avg')
#print(str(team2_avg_line)+' team2 avg')
# find each teamname
for gamename in game.loc[:, 'Name of Game']:
if gamename != '':
teams = (gamename.split(' - '))[0]
teams = teams.split(' at ')
team1 = str(teams[0])
team2 = str(teams[1])
if team1 == 'Bears':
team1 = 'Chicago'
elif team2 == 'Bears':
team2 = 'Chicago'
#print(team2)
# add teamname and their associated line to the dictionary
team_lines[team1] = team1_avg_line
team_lines[team2] = team2_avg_line
# go to next game for each loop
row_start += 6
row_end += 6
# restate game variable for looping
game = df_raw_lines.loc[row_start:row_end]
#print(team_lines)
# create dataframe and put data into csv
df_format_lines = | pd.DataFrame.from_dict(team_lines, orient='index', columns=['Avg Line']) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 20:22:15 2020
@author:
"""
class Comparison:
def __init__(self):
super().__init__()
#The goal of this function is to execute the models and show the differents results.
#It is the function to call when we want to test differents models
#with differents values for parameters
def run_comparison(self, stream, stream_n_features, window = 100,
estimators = 50, anomaly = 0.5, drift_rate = 0.3,
result_folder="Generated", max_sample=100000, n_wait=200,
metrics=['accuracy', 'f1', 'kappa', 'kappa_m',
'running_time','model_size']):
from skmultiflow.anomaly_detection import HalfSpaceTrees
from source.iforestasd_scikitmultiflow import IsolationForestStream
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
# Creation f the result csv
directory_path = 'results/'+str(result_folder)
self.check_directory(path=directory_path)
result_file_path = directory_path+'/result_for_WS'+str(window)+'_NE'+str(estimators)+'.csv'
# 2. Prepare for use This function is usefull to have data window by window
# stream.prepare_for_use() # Deprecated so how to prepare data?
models = [HalfSpaceTrees(n_features=stream_n_features, window_size=window,
n_estimators=estimators, anomaly_threshold=anomaly),
#IForest ASD use all the window_size for the sample in the training phase
IsolationForestStream(window_size=window, n_estimators=estimators,
anomaly_threshold=anomaly, drift_threshold=drift_rate)]
# Setup the evaluator
evaluator = EvaluatePrequential(pretrain_size=1, max_samples=max_sample,
show_plot=True,
metrics=metrics, batch_size=1,
output_file = result_file_path,
n_wait = n_wait)
# 4. Run the evaluation
evaluator.evaluate(stream=stream, model=models, model_names=['HSTrees','iForestASD'])
print("")
print("Please find evaluation results here "+result_file_path)
return
def get_dataset(self, dataset_name="Generator", classification_function=0,
noise_percentage=0.7, random_state=1):
#Dataset
# Name M(#instances) N(#attributes) Anomaly
# Threshold
# Http 567498 3 0.39%
# Smtp 95156 3 0.03%
# ForestCover 286048 10 0.96%
# Shuttle 49097 9 7.15%
if dataset_name=="Generator":
return self.get_data_generated(classification_function,
noise_percentage, random_state);
elif dataset_name=="HTTP":
path = "datasets/HTTP.csv"
return self.get_file_stream(path);
elif dataset_name=="ForestCover":
path = "datasets/ForestCover.csv"
return self.get_file_stream(path);
elif dataset_name=="Shuttle":
path = "datasets/Shuttle.csv"
return self.get_file_stream(path);
elif dataset_name=="SMTP":
path = "datasets/SMTP.csv"
return self.get_file_stream(path);
else:
print("The specified dataset do not exist yet."+
" Try to contact the administrator for any add. "+
" Or choose between these datasets:['Generator','HTTP','ForestCover','Shuttle','SMTP']");
return None
def get_file_stream(self, path):
from skmultiflow.data.file_stream import FileStream
return FileStream(path, n_targets=1, target_idx=-1)
def get_data_stream(self, path):
from skmultiflow.data.data_stream import DataStream
return
def get_data_generated(self,classification_function, noise_percentage, random_state):
from skmultiflow.data import SEAGenerator
return SEAGenerator(classification_function=classification_function,
noise_percentage=noise_percentage, random_state=random_state)
#To transform datasets by replace anomaly label by 1 and normal label by 0
def prepare_dataset_for_anomaly(self, full_dataset, y_column:int,
anomaly_label:str='\'Anomaly\'', file_name:str="new"):
import numpy as np
import pandas as pd
full_dataset[y_column] = np.where(full_dataset[y_column]==anomaly_label,1,0)
dataset = pd.DataFrame(full_dataset)
dataset.drop([0], inplace=True)
full_file_path = "../datasets/"+file_name+".csv"
dataset.to_csv(full_file_path, index=None, header=True)
return dataset
def check_directory(self,path):
from pathlib import Path
Path(path).mkdir(parents=True, exist_ok=True)
def merge_file(self, folder_path, output_file = 'output.csv'):
import os
import pandas as pd
result = pd.DataFrame()
print('List of file merged')
print()
no = '.ipynb_checkpoints'
for file_ in os.listdir(folder_path):
print(file_)
#list.append(file_)
if file_ != no:
print(file_)
df = | pd.read_csv(folder_path+file_, sep = ',', skiprows=6, header = 0, dtype='unicode', error_bad_lines=False) | pandas.read_csv |
#!/usr/bin/env python
#
# Inspired by g_mmpbsa code.
# #
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
from builtins import range
from builtins import object
import re
import numpy as np
import argparse
import sys
import os
import math
import time
from copy import deepcopy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
from matplotlib.colors import ListedColormap
import mdtraj as md
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import converters
from molmolpy.utils import plot_tools
from molmolpy.utils import pdb_tools
from molmolpy.utils import folder_utils
from molmolpy.utils import extra_tools
from molmolpy.utils import pymol_tools
from molmolpy.utils import protein_analysis
class EnergyAnalysisObject(object):
"""
Usage Example
>>> from molmolpy.moldyn import md_analysis
>>> from molmolpy.g_mmpbsa import mmpbsa_analyzer
>>>
>>> import os
>>>
>>> # In[3]:
>>>
>>> folder_to_sim = '/media/Work/SimData/g_mmpbsa/HSL/HSL_1_backbone/Cluster1/'
>>>
>>> molmech = folder_to_sim + 'contrib_MM.dat'
>>> polar = folder_to_sim + 'contrib_pol.dat'
>>> apolar = folder_to_sim + 'contrib_apol.dat'
>>>
>>> LasR_energy_object = mmpbsa_analyzer.EnergyAnalysisObject(molmech, polar, apolar,
>>> sim_num=3)
>>>
>>> LasR_energy_object.plot_bar_energy_residues()
>>> LasR_energy_object.plot_most_contributions()
>>> LasR_energy_object.plot_sorted_contributions()
>>>
>>>
>>> centroid_file = '/media/Work/MEGA/Programming/docking_LasR/HSL_1_v8/centroid.pdb'
>>>
>>>
>>> LasR_energy_object.add_centroid_pdb_file(centroid_file)
>>> LasR_energy_object.save_mmpbsa_analysis_pickle('HSL_simulation_cluster3.pickle')
>>> #LasR_energy_object.visualize_interactions_pymol()
>>>
>>>
>>> test = 1
>>> # simulation_name = 'LasR_Ligand_simulation'
>>> #
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Convert gro to PDB so mdtraj recognises topology
YEAH
gmx editconf -f npt.gro -o npt.pdb
"""
# @profile
def __init__(self,
energymm_xvg,
polar_xvg,
apolar_xvg,
molmech,
polar,
apolar,
bootstrap=True,
bootstrap_steps=5000,
sim_num=1,
receptor_name='LasR',
molecule_name='HSL',
meta_file=None
):
self.receptor_name = receptor_name
self.molecule_name = molecule_name
self.sim_num = sim_num
self.simulation_name = self.receptor_name + '_' + self.molecule_name + '_num:' + str(self.sim_num)
self.meta_file = meta_file
# molmech = folder_to_sim + 'contrib_MM.dat'
# polar = folder_to_sim + 'contrib_pol.dat'
# apolar = folder_to_sim + 'contrib_apol.dat'
# Complex Energy
c = []
if meta_file is not None:
MmFile, PolFile, APolFile = ReadMetafile(meta_file)
for i in range(len(MmFile)):
cTmp = Complex(MmFile[i], PolFile[i], APolFile[i], K[i])
cTmp.CalcEnergy(args, frame_wise, i)
c.append(cTmp)
else:
cTmp = Complex(energymm_xvg, polar_xvg, apolar_xvg)
self.cTmp = cTmp
self.full_copy_original = deepcopy(cTmp)
self.full_copy_bootstrap = deepcopy(cTmp)
# cTmp.CalcEnergy(frame_wise, 0, bootstrap=bootstrap, bootstrap_steps=bootstrap_steps)
# c.append(cTmp)
# Summary in output files => "--outsum" and "--outmeta" file options
# TODO adapt to make able to use bootstrap as well, multiple analysis modes?
self.c = c
# summary_output_filename = self.simulation_name + '_binding_summary.log'
# Summary_Output_File(c, summary_output_filename, meta_file)
#
# corr_outname = self.simulation_name + '_correllation_distance.log'
# corr_plot = self.simulation_name + '_correllation_plot.png'
test = 1
# This won't work it needs K, read paper again
#FitCoef_all = PlotCorr(c, corr_outname, corr_plot, bootstrap_steps)
#PlotEnrgy(c, FitCoef_all, args, args.enplot)
# RESIDUE analysis part
self.MMEnData, self.resnameA = ReadData_Residue_Parse(molmech)
self.polEnData, self.resnameB = ReadData_Residue_Parse(polar)
self.apolEnData, self.resnameC = ReadData_Residue_Parse(apolar)
self.resname = CheckResname(self.resnameA, self.resnameB, self.resnameC)
self.sim_num = sim_num
Residues = []
data = []
columns_residue_energy = ['index', 'ResidueNum', 'Residue', 'TotalEnergy', 'TotalEnergySD']
for i in range(len(self.resname)):
CheckEnData_residue(self.MMEnData[i], self.polEnData[i], self.apolEnData[i])
r = Residue()
r.CalcEnergy(self.MMEnData[i], self.polEnData[i], self.apolEnData[i], bootstrap, bootstrap_steps)
Residues.append(r)
# print(' %8s %8.4f %8.4f' % (self.resname[i], r.TotalEn[0], r.TotalEn[1]))
data.append([i, i + 1, self.resname[i], r.TotalEn[0], r.TotalEn[1]])
self.pandas_residue_energy_data = pd.DataFrame(data)
self.pandas_residue_energy_data.columns = columns_residue_energy
test = 1
self.most_contributions = self.pandas_residue_energy_data[:-1]
self.most_contributions = self.most_contributions.sort_values(['TotalEnergy'])
test = 1
def calculate_binding_energy_full(self, idx=0,jump_data=1, bootstrap=False, bootstrap_steps=5000):
'''
Calculate full binding energy then analyze autocorrelation and partial correlation
:param idx: from frame number
:param bootstrap: for this one dont calculate bootstrap
:param bootstrap_steps:
:return:
'''
# TODO CALCULATION OF BINDING ENERGY
outfr = self.simulation_name + '_full.log'
try:
frame_wise = open(outfr, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(outfr))
frame_wise.write(
'#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
self.frame_wise_full = frame_wise
self.c_full = []
self.full_copy_original.CalcEnergy(self.frame_wise_full, idx, jump_data=jump_data, bootstrap=bootstrap, bootstrap_steps=bootstrap_steps)
self.c_full.append(self.full_copy_original)
summary_output_filename = self.simulation_name + '_binding_summary_full.log'
Summary_Output_File(self.c_full, summary_output_filename, self.meta_file)
self.autocorr_analysis(self.c_full, 'full')
def calculate_binding_energy_bootstrap(self, idx=0, bootstrap=True, bootstrap_steps=5000, bootstrap_jump=4):
'''
Calculate bootstrap binding energy then analyze autocorrelation and partial correlation
:param idx: from frame number
:param bootstrap: for this one dont calculate bootstrap
:param bootstrap_steps:
:return:
'''
# TODO CALCULATION OF BINDING ENERGY
outfr = self.simulation_name + '_bootstrap.log'
try:
frame_wise = open(outfr, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(outfr))
frame_wise.write(
'#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
self.frame_wise_bootstrap = frame_wise
self.c_bootstrap = []
self.full_copy_bootstrap.CalcEnergy(self.frame_wise_bootstrap, idx,
bootstrap=bootstrap,
bootstrap_steps=bootstrap_steps,
bootstrap_jump=bootstrap_jump)
self.c_bootstrap.append(self.full_copy_bootstrap)
summary_output_filename = self.simulation_name + '_binding_summary_bootstrap.log'
Summary_Output_File(self.c_bootstrap, summary_output_filename, self.meta_file)
self.autocorr_analysis(self.c_bootstrap, 'bootstrap')
def autocorr_analysis(self, energy_val, naming='full'):
if naming =='full':
total_en = energy_val[0].TotalEn
time = energy_val[0].time
else:
total_en = energy_val[0].TotalEn_bootstrap
time = energy_val[0].time_bootstrap
# Old version :)
# print('Mean autocorrelation ', np.mean(autocorr(total_en)))
# plt.semilogx(time, autocorr(total_en))
# plt.xlabel('Time [ps]', size=16)
# plt.ylabel('Binding Energy autocorrelation', size=16)
# plt.show()
from pandas import Series
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
series = | Series.from_array(total_en, index=time) | pandas.Series.from_array |
from typing import *
import pandas as pd
from .api import TBARawAPI
from .client import TBACachedSession, TBAQueryArguments
from .exceptions import *
__all__ = ["query_args", "event_helper"]
class TBABaseHelper:
def __init__(self, session: "TBACachedSession"):
self._api = TBARawAPI(session)
def _get_api(self) -> "TBARawAPI":
return self._api
def _has_error(self, req, **kw):
return "Errors" in getattr(self._api, req)(**kw).keys()
class TBAEventHelper(TBABaseHelper):
def __init__(self, session: "TBACachedSession"):
super().__init__(session)
if "event_key" not in session.query_args.query_args_dict:
raise TBARequiredArgumentNotError("Cannot use TBAEventHelper without an event")
self.event_key = session.query_args.query_args_dict["event_key"]
def check_validity(self):
if self._has_error("event"):
raise TBAInvalidKeyError("Invalid Event Key")
def list_matches(self, simple: "bool" = False) -> "dict":
if simple:
return self._api.event_matches_simple()
else:
return self._api.event_matches()
def qualification_match_schedule(self,
use_df=True,
df_one_indexed=True,
transpose=False,
convert_to_int=True):
positions = [(colour, number) for colour in ["Red", "Blue"] for number in [0, 1, 2]]
qualification_matches = {match["match_number"]: match
for match in self.list_matches(simple=True) if match["comp_level"] == "qm"}
schedule_rows = []
for match_number in sorted(qualification_matches.keys()):
match_data = qualification_matches[match_number]
schedule_row = {}
for colour, number in positions:
match_key = match_data["alliances"][colour.lower()]["team_keys"][number]
if convert_to_int:
match_value = int(match_key[3:])
else:
match_value = match_key
schedule_row["{} {}".format(colour, number + 1)] = match_value
schedule_rows.append(schedule_row)
columns = ["{} {}".format(colour, number + 1) for colour, number in positions]
if use_df:
table = | pd.DataFrame(schedule_rows, columns=columns) | pandas.DataFrame |
from flask import (
Blueprint, Flask, request, session, g,
redirect, url_for, abort, render_template, flash,
make_response, send_file
)
from flask_login import login_required, current_user
from pfedu.forms import UserForm, MoleculeForm, PasswdForm
from pfedu.models import db, Molecule, StatMech, User, Reaction, \
ReactionB
from datetime import datetime
import zipfile
import io
from sqlalchemy.exc import IntegrityError
import pandas as pd
bp = Blueprint('admin', __name__, url_prefix='/admin')
# List partition functions
@bp.route('/')
@login_required
def index():
if not current_user.admin:
return redirect(url_for('index'))
mols = Molecule.query.all()
return render_template('admin/index.html', mols=mols)
@bp.route('/passwd', methods=['GET', 'POST'])
@login_required
def passwd():
if not current_user.admin:
return redirect(url_for('index'))
form = PasswdForm()
if form.validate_on_submit():
current_user.set_password(form.passwd.data)
db.session.commit()
flash('Password changed')
return redirect(url_for('admin.index'))
return render_template('admin/passwd.html', form=form)
@bp.route('/get_data/<int:mol_id>')
@login_required
def get_data(mol_id):
if not current_user.admin:
return redirect(url_for('index'))
mol = Molecule.query.get(mol_id)
# Generate CSV
sts = StatMech.query.filter_by(mol_id=mol_id).all()
data = []
for st in sts:
#data.append([st.temp, st.q_trans, st.q_rot, st.q_vib,
# st.q_elec])
data.append([st.temp, st.q_trans, st.q_rot])
#df = pd.DataFrame(data=data,columns=['temperature', 'q_trans',
#'q_rot', 'q_vib', 'q_elec'])
df = pd.DataFrame(data=data,columns=['temperature', 'q_trans',
'q_rot'])
df = df.set_index('temperature')
df = df.sort_index()
res = make_response(df.to_csv())
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M')
res.headers["Content-Disposition"] = \
"attachment; filename=cem484_{}_{}.csv".format(mol.name, timestamp)
res.headers["Content-Type"] = "text/csv"
return res
@bp.route('/get_data_reaction/<reaction>')
@login_required
def get_data_reaction(reaction):
if not current_user.admin:
return redirect(url_for('index'))
# Generate CSV
if reaction == 'a':
reacs = Reaction.query.all()
elif reaction == 'b':
reacs = ReactionB.query.all()
data = []
for reac in reacs:
data.append([reac.temp, reac.delta_g, reac.delta_h,
reac.delta_s, reac.k_p])
df = | pd.DataFrame(data=data,columns=['temperature', 'delta_g',
'delta_h', 'delta_s', 'k_p']) | pandas.DataFrame |
import logging
from functools import lru_cache
from time import perf_counter as pc
from typing import Tuple, Dict, Union, List
import numpy as np
import pandas as pd
from sortedcontainers import SortedDict
from pandas_ml_utils.constants import *
from pandas_ml_utils.model.features_and_labels.features_and_labels import FeaturesAndLabels
from pandas_ml_utils.model.features_and_labels.target_encoder import TargetLabelEncoder, \
MultipleTargetEncodingWrapper, IdentityEncoder
from pandas_ml_utils.model.fitting.splitting import train_test_split
from pandas_ml_utils.utils.classes import ReScaler
from pandas_ml_utils.utils.functions import log_with_time, call_callable_dynamic_args, unique_top_level_columns, \
join_kwargs, integrate_nested_arrays
_log = logging.getLogger(__name__)
class FeatureTargetLabelExtractor(object):
def __init__(self, df: pd.DataFrame, features_and_labels: FeaturesAndLabels, **kwargs):
# prepare fields
labels = features_and_labels.labels
encoder = lambda frame, **kwargs: frame
label_columns = None
joined_kwargs = join_kwargs(features_and_labels.kwargs, kwargs)
# eventually transform callable labels to its expected structure
if callable(labels):
labels = call_callable_dynamic_args(labels, df, **joined_kwargs)
# unfold labels, currently supported types are:
# Union[List[str], TargetLabelEncoder, Dict[str, Union[List[str], TargetLabelEncoder]]]
if isinstance(labels, list):
label_columns = labels
elif isinstance(labels, TargetLabelEncoder):
encoder = labels.encode
label_columns = labels.labels_source_columns
elif isinstance(labels, Dict):
# this is our multi model case, here we add an extra dimension to the labels array
label_columns = [l for ls in labels.values()
for l in (ls if isinstance(ls, list) else ls.labels_source_columns)]
# we need a special encoder which is wrapping all encoder for each target
encoder = MultipleTargetEncodingWrapper({
t: l if isinstance(l, TargetLabelEncoder) else IdentityEncoder(l) for t, l in labels.items()
}).encode
# assign all fields
self._features_and_labels = features_and_labels # depricated copy all fields here
self._features = features_and_labels.features
self._labels_columns = label_columns
self._labels = labels
self._label_type = features_and_labels.label_type
self._targets = features_and_labels.targets
self._gross_loss = features_and_labels.gross_loss
self._encoder = encoder
self._joined_kwargs = joined_kwargs
# pre assign this variable
# but notice that it get overwritten by an engineered data frame later on
self._df = df
# this function uses clojures
def call_dynamic(func, *args):
joined_kwargs = join_kwargs(self.__dict__, self._joined_kwargs)
return call_callable_dynamic_args(func, *args, **joined_kwargs)
self._df = call_dynamic(features_and_labels.pre_processor, df)
self.__call_dynamic = call_dynamic
@property
def df(self):
return self._df
@property
def min_required_samples(self):
return len(self._df) - len(self.features_df) + 1
def prediction_to_frame(self,
prediction: np.ndarray,
index: pd.Index = None,
inclusive_labels: bool = False,
inclusive_source: bool = False) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
# sanity check
if not isinstance(prediction, np.ndarray):
raise ValueError(f"got unexpected prediction: {type(prediction)}\n{prediction}")
# assign index
index = self._df.index if index is None else index
# eventually fix the shape of the prediction
if len(prediction.shape) == 1:
prediction = prediction.reshape(len(prediction), 1)
# prediction_columns
columns = pd.MultiIndex.from_tuples(self.label_names(PREDICTION_COLUMN_NAME))
multi_dimension_prediction = len(prediction.shape) > 1 and len(columns) < prediction.shape[1]
if multi_dimension_prediction:
if len(prediction.shape) < 3:
df = pd.DataFrame({"a":[ r.tolist() for r in prediction]}, index=index)
else:
df = pd.DataFrame({col: [row.tolist() for row in prediction[:, col]] for col in range(prediction.shape[1])},index=index)
df.columns = columns
else:
df = pd.DataFrame(prediction, index=index, columns=columns)
# add labels if requested
if inclusive_labels:
dfl = self.labels_df
dfl.columns = pd.MultiIndex.from_tuples(self.label_names(LABEL_COLUMN_NAME))
df = df.join(dfl, how='inner')
# add loss if provided
loss_df = self.gross_loss_df
df = df.join(loss_df.loc[df.index], how='inner') if loss_df is not None else df
# add target if provided
target_df = self.target_df
df = df.join(target_df.loc[df.index], how='inner') if target_df is not None else df
# also add source if requested
if inclusive_source:
df = df.join(self.source_df, how='inner')
# finally we can return our nice and shiny df
return df
def training_and_test_data(self,
test_size: float = 0.4,
youngest_size: float = None,
seed: int = 42) -> Tuple[Tuple[np.ndarray,...], Tuple[np.ndarray,...]]:
features, labels, weights = self.features_labels_weights_df
train_ix, test_ix = train_test_split(features.index, test_size, youngest_size, seed=seed)
return (
(train_ix,
features.loc[train_ix].values,
integrate_nested_arrays(labels.loc[train_ix].values),
weights.loc[train_ix].values if weights is not None else None),
(test_ix,
features.loc[test_ix].values,
integrate_nested_arrays(labels.loc[test_ix].values),
weights.loc[test_ix].values if weights is not None else None)
)
@property
def features_labels_weights_df(self) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
# engineer features and labels
df_features = self.features_df
df_labels = self.labels_df
index_intersect = df_features.index.intersection(df_labels.index)
# select only joining index values
df_features = df_features.loc[index_intersect]
df_labels = df_labels.loc[index_intersect]
# TODO add proper label weights
df_weights = None #pd.DataFrame(np.ones(len(df_labels)), index=df_labels.index)
# sanity check
if not len(df_features) == len(df_labels):
raise ValueError(f"unbalanced length of features and labels {len(df_features), len(df_labels)}")
return df_features, df_labels, df_weights
@property
@lru_cache(maxsize=1)
def features_df(self) -> pd.DataFrame:
start_pc = log_with_time(lambda: _log.debug(" make features ..."))
feature_lags = self._features_and_labels.feature_lags
features = self._features
lag_smoothing = self._features_and_labels.lag_smoothing
feature_rescaling = self._features_and_labels.feature_rescaling
# drop nan's and copy frame
df = self._df[features].dropna().copy()
# generate feature matrix
if feature_lags is None:
dff = df
else:
dff = | pd.DataFrame({}, index=df.index) | pandas.DataFrame |
# Functions for performing analysis in the article
# "Material Culture Studies in the Age of Big Data:
# Digital Excavation of Homemade Facemask Production
# during the COVID-19 Pandemic"
#
# Code Written By: <NAME>
#
# For import/use instructions, see README.md
import pandas as pd
import geopandas as gpd
import nltk
import itertools
import collections
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# download necessary NLTK Data if don't already have it
nltk.download('stopwords', quiet=True)
nltk.download('punkt', quiet=True)
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
# Define general stop words + study-specific stop-words
stop = nltk.corpus.stopwords.words('english') \
+ ["face", "mask", "masks", "facemask", "facemasks"]
# Term lists
intentionality_eff = \
[("two", "layer"), ("double", "layer"), ("2", "layer"),
("three", "layer"), ("triple", "layer"), ("3", "layer"),
("multi", "layer"), ("multiple", "layer"), "multilayer", "multilayered",
"upf", "uv", "thick", "cotton",
("adjustable", "fit"), ("form", "fit"), ("snug", "fit"), ("tight", "fit"),
("nose", "wire"),
("cover", "chin"), ("cover", "nose"), ("cover", "mouth"),
("filter", "pocket"), "cotton", "kn95", "n95"]
intentionality_ineff = \
["mesh", "crochet", "yarn", "lace", "hole",
("one", "layer"), ("single", "layer"), ("1", "layer"),
"compliance", "antimask", ("anti", "mask"), "protest"]
unintentionality_ineff = ["valve", "thin", "loose"]
mesh = ["mesh"]
antimask = ["antimask", ("anti", "mask")]
# List of states won by Biden and Trump, respectively
biden = ["Washington", "Oregon", "California", "Nevada",
"Arizona", "New Mexico", "Colorado", "Hawaii",
"Minnesota", "Wisconsin", "Illinois", "Michigan",
"Georgia", "Pennsylvania", "Virginia", "Maryland",
"New Jersey", "New York", "Massachusetts", "Connecticut",
"Rhode Island", "Delaware", "Vermont", "New Hampshire",
"Maine"]
trump = ["Alaska", "Idaho", "Utah", "Montana",
"Wyoming", "North Dakota", "South Dakota", "Nebraska",
"Kansas", "Oklahoma", "Texas", "Iowa",
"Missouri", "Arkansas", "Louisiana", "Indiana",
"Kentucky", "Tennessee", "Mississippi", "Alabama",
"West Virginia", "Ohio", "North Carolina", "South Carolina",
"Florida"]
def process_data(data_path='data/'):
'''
Takes clean Etsy data (in subdirectory provided as input)
and processes it for user. All of the necessary files (SHP file
containing polygon boundaries of U.S. states from the U.S. Census Bureau
as of 2020, along with a CSV of collected Etsy facemask data that has
had its text columns pre-cleaned of extraneous characters) are
in the data/ subdirectory of this repository, so `data/` is the default
path.
Returns Pandas DataFrame (with lemmatized and tokenized
listing titles), along with a GeoPandas DataFrame, containing
U.S. state polygons from the 2020 census (shp)
'''
df = | pd.read_csv(data_path + 'clean_etsy_data.csv') | pandas.read_csv |
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from datetime import datetime
from decimal import *
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from shapely.geometry.polygon import LineString
from sklearn.preprocessing import MinMaxScaler
import cw_utils as cw
def load_data(fname):
data = []
with open(fname, 'r') as f:
for line in f.readlines():
if "SIM_TRACE_LOG" in line:
parts = line.split("SIM_TRACE_LOG:")[1].split('\t')[0].split(",")
data.append(",".join(parts))
return data
def calc_velocity(timestamp, curr_position, prev_timestamp, prev_position):
time_diff = timestamp - prev_timestamp
curr_x, curr_y = curr_position
prev_x, prev_y = prev_position
distance_travelled = (((prev_x - curr_x) ** 2) + ((prev_y - curr_y) ** 2)) ** .5
return (distance_travelled / float(time_diff)) / 100
def convert_to_pandas(data, episodes_per_iteration=20):
"""
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
"""
df_list = list()
prev_episode = None
prev_timestamp = None
prev_position = (0, 0)
# ignore the first two dummy values that coach throws at the start.
for d in data[2:]:
parts = d.rstrip().split(",")
episode = int(parts[0])
steps = int(parts[1])
x = 100 * float(parts[2])
y = 100 * float(parts[3])
yaw = float(parts[4])
steer = float(parts[5])
throttle = float(parts[6])
action = float(parts[7])
reward = float(parts[8])
done = 0 if 'False' in parts[9] else 1
all_wheels_on_track = parts[10]
progress = float(parts[11])
closest_waypoint = int(parts[12])
track_len = float(parts[13])
tstamp = Decimal(parts[14])
curr_position = (x, y)
if prev_episode != episode:
velocity = 0
else:
velocity = calc_velocity(tstamp, curr_position, prev_timestamp, prev_position)
prev_timestamp = tstamp
prev_position = curr_position
prev_episode = episode
iteration = int(episode / episodes_per_iteration) + 1
df_list.append((iteration, episode, steps, x, y, yaw, steer, throttle,
action, reward, done, all_wheels_on_track, progress,
closest_waypoint, track_len, tstamp, velocity))
header = ['iteration', 'episode', 'steps', 'x', 'y', 'yaw', 'steer',
'throttle', 'action', 'reward', 'done', 'on_track', 'progress',
'closest_waypoint', 'track_len', 'timestamp', 'velocity']
df = pd.DataFrame(df_list, columns=header)
return df
def normalize_rewards(df):
# Normalize the rewards to a 0-1 scale
min_max_scaler = MinMaxScaler()
scaled_vals = min_max_scaler.fit_transform(
df['reward'].values.reshape(df['reward'].values.shape[0], 1))
df['reward'] = pd.DataFrame(scaled_vals.squeeze())
def episode_parser(data):
"""
Arrange data per episode
"""
action_map = {} # Action => [x,y,reward]
episode_map = {} # Episode number => [x,y,action,reward]
for d in data[:]:
parts = d.rstrip().split("SIM_TRACE_LOG:")[-1].split(",")
e = int(parts[0])
x = float(parts[2])
y = float(parts[3])
angle = float(parts[5])
ttl = float(parts[6])
action = int(parts[7])
reward = float(parts[8])
try:
episode_map[e]
except KeyError:
episode_map[e] = np.array([0, 0, 0, 0, 0, 0]) # dummy
episode_map[e] = np.vstack(
(episode_map[e], np.array([x, y, action, reward, angle, ttl])))
try:
action_map[action]
except KeyError:
action_map[action] = []
action_map[action].append([x, y, reward])
# top laps
total_rewards = {}
for x in episode_map.keys():
arr = episode_map[x]
total_rewards[x] = np.sum(arr[:, 3])
import operator
top_idx = dict(sorted(total_rewards.items(),
key=operator.itemgetter(1),
reverse=True)[:])
sorted_idx = list(top_idx.keys())
return action_map, episode_map, sorted_idx
def make_error_boxes(ax, xdata, ydata, xerror, yerror, facecolor='r',
edgecolor='r', alpha=0.3):
# Create list for all the error patches
errorboxes = []
# Loop over data points; create box from errors at each point
for x, y, xe, ye in zip(xdata, ydata, xerror.T, yerror.T):
rect = Rectangle((x - xe[0], y - ye[0]), xe.sum(), ye.sum())
errorboxes.append(rect)
# Create patch collection with specified colour/alpha
pc = PatchCollection(errorboxes, facecolor=facecolor, alpha=alpha,
edgecolor=edgecolor)
# Add collection to axes
ax.add_collection(pc)
return 0
def v_color(ob):
color = {
True: '#6699cc',
False: '#ffcc33'
}
return color[ob.is_simple]
def plot_coords(ax, ob):
x, y = ob.xy
ax.plot(x, y, '.', color='#999999', zorder=1)
def plot_bounds(ax, ob):
x, y = zip(*list((p.x, p.y) for p in ob.boundary))
ax.plot(x, y, '.', color='#000000', zorder=1)
def plot_line(ax, ob, color='cyan'):
x, y = ob.xy
ax.plot(x, y, color=color, alpha=0.7, linewidth=3, solid_capstyle='round',
zorder=2)
def print_border(ax, waypoints, inner_border_waypoints, outer_border_waypoints,
color='lightgrey'):
line = LineString(waypoints)
plot_coords(ax, line)
plot_line(ax, line, color)
line = LineString(inner_border_waypoints)
plot_coords(ax, line)
plot_line(ax, line, color)
line = LineString(outer_border_waypoints)
plot_coords(ax, line)
plot_line(ax, line, color)
def plot_top_laps(sorted_idx, episode_map, center_line, inner_border,
outer_border, n_laps=5):
fig = plt.figure(n_laps, figsize=(12, n_laps * 10))
for i in range(n_laps):
idx = sorted_idx[i]
episode_data = episode_map[idx]
ax = fig.add_subplot(n_laps, 1, i + 1)
line = LineString(center_line)
plot_coords(ax, line)
plot_line(ax, line)
line = LineString(inner_border)
plot_coords(ax, line)
plot_line(ax, line)
line = LineString(outer_border)
plot_coords(ax, line)
plot_line(ax, line)
for idx in range(1, len(episode_data) - 1):
x1, y1, action, reward, angle, speed = episode_data[idx]
car_x2, car_y2 = x1 - 0.02, y1
plt.plot([x1 * 100, car_x2 * 100], [y1 * 100, car_y2 * 100], 'b.')
plt.show()
plt.clf()
return fig
def plot_evaluations(evaluations, inner, outer, graphed_value='throttle'):
streams = evaluations.sort_values('timestamp', ascending=False).groupby('stream', sort=False)
for name, stream in streams:
fig, axes = plt.subplots(2, 3, figsize=(20, 10))
fig.tight_layout(pad=0.4, w_pad=0.5, h_pad=7.0)
for id, episode in stream.groupby('episode'):
plot_grid_world(episode, inner, outer, graphed_value, ax=axes[int(id / 3), id % 3])
plt.show()
plt.clf()
def plot_grid_world(episode_df, inner, outer, graphed_value='throttle', min_progress=None, ax=None):
"""
plot a scaled version of lap, along with throttle taken a each position
"""
lap_time = np.ptp(episode_df['timestamp'].astype(float))
average_velocity = np.nanmean(episode_df['velocity'])
max_velocity = np.max(episode_df['velocity'])
average_throttle = np.nanmean(episode_df['throttle'])
progress = np.nanmax(episode_df['progress'])
distance = average_velocity * lap_time
if not min_progress or progress > min_progress:
distance_lap_time = 'Distance = %.2f (meters), progress = %.2f %%, lap time = %.2f (sec)' % (
distance, progress, lap_time)
throttle_velocity = 'Average throttle = %.2f (Gazebo), Average velocity = %.2f (meters/sec), Maximum velocity = %.2f (meters/sec)' % (
average_throttle, average_velocity, max_velocity)
fig = None
if ax is None:
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor('midnightblue')
line = LineString(inner)
plot_coords(ax, line)
plot_line(ax, line)
line = LineString(outer)
plot_coords(ax, line)
plot_line(ax, line)
episode_df.plot.scatter('x', 'y', ax=ax, s=3, c=graphed_value, cmap=plt.get_cmap('plasma'))
subtitle = '%s%s\n%s\n%s' % (
('Stream: %s, ' % episode_df['stream'].iloc[0]) if 'stream' in episode_df.columns else '',
datetime.fromtimestamp(episode_df['timestamp'].iloc[0]),
distance_lap_time,
throttle_velocity)
ax.set_title(subtitle)
if fig:
plt.show()
plt.clf()
def simulation_agg(panda, firstgroup='iteration', add_timestamp=False, is_eval=False):
grouped = panda.groupby([firstgroup, 'episode'])
by_steps = grouped['steps'].agg(np.max).reset_index()
by_start = grouped.first()['closest_waypoint'].reset_index() \
.rename(index=str, columns={"closest_waypoint": "start_at"})
by_progress = grouped['progress'].agg(np.max).reset_index()
by_throttle = grouped['throttle'].agg(np.mean).reset_index()
by_velocity = grouped['velocity'].agg(np.mean).reset_index().rename(columns={'velocity': 'mean_velocity'})
by_max_velocity = grouped['velocity'].agg(np.max).reset_index().rename(columns={'velocity': 'max_velocity'})
by_time = grouped['timestamp'].agg(np.ptp).reset_index() \
.rename(index=str, columns={"timestamp": "time"})
by_time['time'] = by_time['time'].astype(float)
result = by_steps \
.merge(by_start) \
.merge(by_progress, on=[firstgroup, 'episode']) \
.merge(by_time, on=[firstgroup, 'episode'])
if not is_eval:
if 'new_reward' not in panda.columns:
print('new reward not found, using reward as its values')
panda['new_reward'] = panda['reward']
by_new_reward = grouped['new_reward'].agg(np.sum).reset_index()
result = result.merge(by_new_reward, on=[firstgroup, 'episode'])
result = result.merge(by_throttle, on=[firstgroup, 'episode']).merge(by_velocity, on=[firstgroup, 'episode']).merge(by_max_velocity, on=[firstgroup, 'episode'])
if not is_eval:
by_reward = grouped['reward'].agg(np.sum).reset_index()
result = result.merge(by_reward, on=[firstgroup, 'episode'])
result['time_if_complete'] = result['time'] * 100 / result['progress']
if not is_eval:
result['reward_if_complete'] = result['reward'] * 100 / result['progress']
result['quintile'] = pd.cut(result['episode'], 5, labels=['1st', '2nd', '3rd', '4th', '5th'])
if add_timestamp:
by_timestamp = grouped['timestamp'].agg(np.max).astype(float).reset_index()
by_timestamp['timestamp'] = | pd.to_datetime(by_timestamp['timestamp'], unit='s') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
set of functions to drive EasyQuake
"""
print(r"""
____ __
___ ____ ________ __/ __ \__ ______ _/ /_____
/ _ \/ __ `/ ___/ / / / / / / / / / __ `/ //_/ _ \
/ __/ /_/ (__ ) /_/ / /_/ / /_/ / /_/ / ,< / __/
\___/\__,_/____/\__, /\___\_\__,_/\__,_/_/|_|\___/
/____/
Earthquake detection and location open-source software
<NAME> - Oklahoma Geological Survey
http://github.com/jakewalter/easyQuake
https://easyquake.readthedocs.io
<EMAIL>
""")
#import sys
#sys.path.append("/home/jwalter/syncpython")
from .phasepapy import fbpicker
pathgpd = '/'.join(str(fbpicker.__file__).split("/")[:-2])+'/gpd_predict'
pathEQT = '/'.join(str(fbpicker.__file__).split("/")[:-2])+'/EQTransformer'
pathhyp = '/'.join(str(fbpicker.__file__).split("/")[:-2])+'/hyp2000'
from .phasepapy import tables1D, assoc1D
from .phasepapy import tt_stations_1D
import os
st = os.stat(pathgpd+'/gpd_predict.py')
st1 = os.stat(pathEQT+'/mseed_predictor.py')
import stat
import os
from obspy import UTCDateTime
from obspy import Inventory, read_inventory
from obspy.clients.fdsn import Client
from obspy import read
import numpy as np
import glob
import obspy.taup as taup
from obspy.taup import TauPyModel
#from obspy.taup.velocity_model import VelocityModel
from obspy.taup.taup_create import build_taup_model
from obspy import geodetics
from obspy.clients.fdsn.mass_downloader import CircularDomain, RectangularDomain, Restrictions, MassDownloader
from obspy.core.event.base import WaveformStreamID
from sqlalchemy.orm import *
from sqlalchemy import create_engine
import pandas as pd
import sqlite3
from sqlite3 import Error
from obspy.geodetics import gps2dist_azimuth, kilometer2degrees
import re
from datetime import datetime
from obspy import Stream
from obspy.core.event import Catalog, Event, Magnitude, Origin, Pick, StationMagnitude, Amplitude, Arrival, OriginUncertainty, OriginQuality, ResourceIdentifier
import h5py
#from obspy.signal.invsim import simulate_seismometer as seis_sim
fmtP = "%4s%1sP%1s%1i %15s"
fmtS = "%12s%1sS%1s%1i\n"
fmt = "%6s%02i%05.2f%1s%03i%05.2f%1s%4i\n"
#min_proba = 0.993 # Minimum softmax probability for phase detection
## try 0.992 if you have the computing power
#freq_min = 3.0
#freq_max = 20.0
#filter_data = True
#decimate_data = True # If false, assumes data is already 100 Hz samprate
#n_shift = 10 # Number of samples to shift the sliding window at a time
#n_gpu = 1 # Number of GPUs to use (if any)
######################
#batch_size = 1000*3
#
#half_dur = 2.00
#only_dt = 0.01
#n_win = int(half_dur/only_dt)
#n_feat = 2*n_win
from datetime import timedelta, date
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
class SCNL():
""" This class is copied from PhasePaPy"""
def __init__(self,input=None):
if not isinstance(input, SCNL):
self.station=None
self.channel=None
self.network=None
self.location=None
if type(input) is str:
self.parse_scnlstr(input)
if type(input) is list:
if len(input)==4:
self.station,self.channel,self.network,self.location=input
if len(input)==3:
self.station,self.channel,self.network=input
def download_mseed(dirname=None, project_folder=None, single_date=None, minlat=None, maxlat=None, minlon=None, maxlon=None, dense=False):
starting = UTCDateTime(single_date.strftime("%Y")+'-'+single_date.strftime("%m")+'-'+single_date.strftime("%d")+'T00:00:00.0')
stopping = starting + 86430
starttime = starting
endtime = stopping
#domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)
domain = RectangularDomain(minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon)
#domain = RectangularDomain(minlatitude=-90, maxlatitude=-60,minlongitude=-180, maxlongitude=180)
if dense:
restrictions = Restrictions(starttime=starttime, endtime=endtime,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=1, channel_priorities=["HH[ZNE12]", "BH[ZNE12]","EH[ZNE12]","SH[ZNE12]","HN[ZNE12]","EN[ZNE12]"])
else:
restrictions = Restrictions(starttime=starttime, endtime=endtime,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=5000, channel_priorities=["HH[ZNE12]", "BH[ZNE12]","EH[ZNE12]","SH[ZNE12]","HN[ZNE12]","EN[ZNE12]"])
mseed1 = project_folder+'/'+dirname
if not os.path.exists(mseed1):
os.makedirs(mseed1) #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)
#original1 = project_folder+'/*.[BH]??__'+dirname+'*'
#os.system("mv %s %s" % (original1,mseed1))
mdl = MassDownloader()
mdl.download(domain, restrictions, threads_per_client=4, mseed_storage=mseed1,stationxml_storage=mseed1)
def download_mseed_event(dirname=None, project_folder=None, starting=None, stopping = None, minlat=None, maxlat=None, minlon=None, maxlon=None, maxrad=None):
starttime = starting
endtime = stopping
#domain = CircularDomain(lat1,lon1,minradius=0.0, maxradius=maxrad)
domain = RectangularDomain(minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon)
#domain = RectangularDomain(minlatitude=-90, maxlatitude=-60,minlongitude=-180, maxlongitude=180)
restrictions = Restrictions(starttime=starttime, endtime=endtime,chunklength_in_sec=86400,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=5000, channel_priorities=["HH[ZNE12]", "BH[ZNE12]","EH[ZNE12]","SH[ZNE12]","HN[ZNE12]","EN[ZNE12]"])
mseed1 = project_folder+'/'+dirname
if not os.path.exists(mseed1):
os.makedirs(mseed1) #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)
#original1 = project_folder+'/*.[BH]??__'+dirname+'*'
#os.system("mv %s %s" % (original1,mseed1))
mdl = MassDownloader()
mdl.download(domain, restrictions, threads_per_client=4, mseed_storage=mseed1,stationxml_storage=mseed1)
def download_mseed_event_radial(dirname=None, project_folder=None, starting=None, stopping = None, lat1=None, lon1=None, maxrad=None):
starttime = starting
endtime = stopping
domain = CircularDomain(lat1,lon1,minradius=0.0, maxradius=maxrad)
#domain = RectangularDomain(minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon)
#domain = RectangularDomain(minlatitude=-90, maxlatitude=-60,minlongitude=-180, maxlongitude=180)
restrictions = Restrictions(starttime=starttime, endtime=endtime,chunklength_in_sec=86400,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=1000, channel_priorities=["HH[ZNE12]", "BH[ZNE12]","EH[ZNE12]","SH[ZNE12]","HN[ZNE12]","EN[ZNE12]"])
mseed1 = project_folder+'/'+dirname
if not os.path.exists(mseed1):
os.makedirs(mseed1) #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)
#original1 = project_folder+'/*.[BH]??__'+dirname+'*'
#os.system("mv %s %s" % (original1,mseed1))
mdl = MassDownloader()
mdl.download(domain, restrictions, threads_per_client=4, mseed_storage=mseed1,stationxml_storage=mseed1)
def process_local_sac():
print('Local sac files')
def build_tt_tables(lat1=None,long1=None,maxrad=None,starting=None, stopping=None, channel_codes=['EH','BH','HH','HN'],db=None,maxdist=500.,source_depth=5., delta_distance=1, model=None):
"""
"""
# Create a connection to an sqlalchemy database
tt_engine=create_engine(db,echo=False, connect_args={'check_same_thread': False})
tt_stations_1D.BaseTT1D.metadata.create_all(tt_engine)
TTSession=sessionmaker(bind=tt_engine)
tt_session=TTSession()
fdsnclient=Client()
inv=fdsnclient.get_stations(starttime=starting,endtime=stopping,latitude=lat1,longitude=long1,maxradius=maxrad,channel='*H*',level='response')
# Get inventory
for net in inv:
network=net.code
for sta in net:
loccodes=[]
for ch in sta:
for cc in channel_codes:
if re.match(cc,ch.code):
if not ch.location_code in loccodes:
loccodes.append(ch.location_code)
for loc in loccodes:
print(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)
station=tt_stations_1D.Station1D(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)
tt_session.add(station)
tt_session.commit()
# Now we have to build our traveltime lookup tables
# We will use IASP91 here but obspy.taup does let you build your own model
if model is not None:
filename = model
#vmodel = VelocityModel.read_tvel_file(filename)
if os.path.exists(project_folder+'/'+f"{filename[:-5]}.npz"):
velmod = TauPyModel(model=project_folder+'/'+f"{filename[:-5]}.npz")
else:
taup_model = build_taup_model(filename, output_folder=os.getcwd())
velmod = TauPyModel(model=project_folder+'/'+f"{filename[:-5]}.npz")
else:
velmod=taup.TauPyModel(model='iasp91')
#delta_distance=1. # km for spacing tt calculations
distance_km=np.arange(0,maxdist+delta_distance,delta_distance)
for d_km in distance_km:
d_deg=geodetics.kilometer2degrees(d_km)
ptimes=[]
stimes=[]
p_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,
distance_in_degree=d_deg,phase_list=['P','p'])
for p in p_arrivals:
ptimes.append(p.time)
s_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,
distance_in_degree=d_deg,phase_list=['S','s'])
for s in s_arrivals:
stimes.append(s.time)
tt_entry=tt_stations_1D.TTtable1D(d_km,d_deg,np.min(ptimes),np.min(stimes),np.min(stimes)-np.min(ptimes))
tt_session.add(tt_entry)
tt_session.commit() # Probably faster to do the commit outside of loop but oh well
tt_session.close()
return inv
def build_tt_tables_local_directory(dirname=None,project_folder=None,channel_codes=['EH','BH','HH','HN'],db=None,maxdist=800.,source_depth=5.,delta_distance=1, model=None):
"""
"""
# Create a connection to an sqlalchemy database
tt_engine=create_engine(db,echo=False, connect_args={'check_same_thread': False})
tt_stations_1D.BaseTT1D.metadata.create_all(tt_engine)
TTSession=sessionmaker(bind=tt_engine)
tt_session=TTSession()
inv = Inventory()
dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')
for file1 in dir1a:
inv1a = read_inventory(file1)
inv.networks.extend(inv1a)
for net in inv:
network=net.code
for sta in net:
loccodes=[]
for ch in sta:
for cc in channel_codes:
if re.match(cc,ch.code):
if not ch.location_code in loccodes:
loccodes.append(ch.location_code)
for loc in loccodes:
print(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)
station=tt_stations_1D.Station1D(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)
tt_session.add(station)
tt_session.commit()
# Now we have to build our traveltime lookup tables
# We will use IASP91 here but obspy.taup does let you build your own model
if model is not None:
filename = model
#vmodel = VelocityModel.read_tvel_file(filename)
if os.path.exists(project_folder+'/'+f"{filename[:-5]}.npz"):
velmod = TauPyModel(model=project_folder+'/'+f"{filename[:-5]}.npz")
else:
taup_model = build_taup_model(filename, output_folder=os.getcwd())
velmod = TauPyModel(model=project_folder+'/'+f"{filename[:-5]}.npz")
else:
velmod=taup.TauPyModel(model='iasp91')
#delta_distance=1. # km for spacing tt calculations
distance_km=np.arange(0,maxdist+delta_distance,delta_distance)
for d_km in distance_km:
d_deg=geodetics.kilometer2degrees(d_km)
ptimes=[]
stimes=[]
p_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,
distance_in_degree=d_deg,phase_list=['P','p'])
for p in p_arrivals:
ptimes.append(p.time)
s_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,
distance_in_degree=d_deg,phase_list=['S','s'])
for s in s_arrivals:
stimes.append(s.time)
#print(d_km,ptimes,stimes)
tt_entry=tt_stations_1D.TTtable1D(d_km,d_deg,np.min(ptimes),np.min(stimes),np.min(stimes)-np.min(ptimes))
tt_session.add(tt_entry)
tt_session.commit() # Probably faster to do the commit outside of loop but oh well
tt_session.close()
return inv
def build_tt_tables_local_directory_ant(dirname=None,project_folder=None,channel_codes=['EH','BH','HH'],db=None,maxdist=800.,source_depth=5.,delta_distance=1):
"""
"""
# Create a connection to an sqlalchemy database
tt_engine=create_engine(db,echo=False, connect_args={'check_same_thread': False})
tt_stations_1D.BaseTT1D.metadata.create_all(tt_engine)
TTSession=sessionmaker(bind=tt_engine)
tt_session=TTSession()
inv = Inventory()
dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')
m = Basemap(projection='spstere',boundinglat=-60,lon_0=180,resolution='i')
for file1 in dir1a:
inv1a = read_inventory(file1)
inv.networks.extend(inv1a)
for net in inv:
network=net.code
for sta in net:
loccodes=[]
for ch in sta:
for cc in channel_codes:
if re.match(cc,ch.code):
if not ch.location_code in loccodes:
loccodes.append(ch.location_code)
for loc in loccodes:
print(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)
x,y = m(sta.longitude,sta.latitude)
station=tt_stations_1D.Station1D(sta.code,network,loc,y,x,sta.elevation)
tt_session.add(station)
tt_session.commit()
# Now we have to build our traveltime lookup tables
# We will use IASP91 here but obspy.taup does let you build your own model
velmod=taup.TauPyModel(model='iasp91')
#delta_distance=1. # km for spacing tt calculations
distance_km=np.arange(0,maxdist+delta_distance,delta_distance)
for d_km in distance_km:
d_deg=geodetics.kilometer2degrees(d_km)
ptimes=[]
stimes=[]
p_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,
distance_in_degree=d_deg,phase_list=['P','p'])
for p in p_arrivals:
ptimes.append(p.time)
s_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,
distance_in_degree=d_deg,phase_list=['S','s'])
for s in s_arrivals:
stimes.append(s.time)
tt_entry=tt_stations_1D.TTtable1D(d_km,d_deg,np.min(ptimes),np.min(stimes),np.min(stimes)-np.min(ptimes))
tt_session.add(tt_entry)
tt_session.commit() # Probably faster to do the commit outside of loop but oh well
tt_session.close()
return inv
def fb_pick(dbengine=None,picker=None,fileinput=None):
fdir = []
engine_assoc=dbengine
with open(fileinput) as f:
for line in f:
tmp = line.split()
fdir.append([tmp[0], tmp[1], tmp[2]])
nsta = len(fdir)
for i in range(nsta):
Session=sessionmaker(bind=engine_assoc)
dbsession=Session()
st = Stream()
st += read(fdir[i][0])
st += read(fdir[i][1])
st += read(fdir[i][2])
st.merge(fill_value='interpolate')
#print(st)
for tr in st:
if isinstance(tr.data, np.ma.masked_array):
tr.data = tr.data.filled()
st.detrend(type='linear')
for tr in st:
print(tr)
scnl,picks,polarity,snr,uncert=picker.picks(tr)
t_create=datetime.utcnow()
for i in range(len(picks)):
new_pick=tables1D.Pick(scnl,picks[i].datetime,polarity[i],snr[i],uncert[i],t_create)
dbsession.add(new_pick)
def gpd_pick_add(dbsession=None,fileinput=None,inventory=None):
filepath = fileinput
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
try:
line = fp.readline()
#print(line)
cnt += 1
if len(line.split())>4:
sta1 = line.split()[1]
chan1 = line.split()[2]
#print(sta1,chan1)
#scnl.station = sta1
net1 = line.split()[0]
scnl = SCNL([sta1,chan1,net1])
#print(scnl.channel)
type1 = line.split()[3]
scnl.phase = type1
#print(scnl.phase)
time1 = UTCDateTime(line.split()[4]).datetime
else:
sta1 = line.split()[0]
chan1 = line.split()[1]
#print(sta1,chan1)
#scnl.station = sta1
#net1 = line.split()[0]
try:
net1 = inventory.select(station=sta1)[0].code
except:
net1 = 'OK'
pass
scnl = SCNL([sta1,chan1,net1])
#print(scnl.channel)
type1 = line.split()[2]
scnl.phase = type1
#print(scnl.phase)
time1 = UTCDateTime(line.split()[3]).datetime
t_create=datetime.utcnow()
new_pick=tables1D.Pick(scnl,time1,'',10,0.1,t_create)
#tables1D.Pick.phase=type1
dbsession.add(new_pick) # Add pick i to the database
dbsession.commit() #
except:
pass
def get_chan1(stationfile):
if len(list(filter(None, stationfile.split('/')[-1].split('.'))))==5:
comp = list(filter(None, stationfile.split('/')[-1].split('.')))[3][2]
else:
comp = list(filter(None, stationfile.split('/')[-1].split('.')))[2][2]
return comp
def get_chan3(stationfile):
if len(list(filter(None, stationfile.split('/')[-1].split('.'))))==5:
comp3 = list(filter(None, stationfile.split('/')[-1].split('.')))[3][0:3]
else:
comp3 = list(filter(None, stationfile.split('/')[-1].split('.')))[2][0:3]
return comp3
def detection_continuous(dirname=None, project_folder=None, project_code=None, local=True, machine=True, machine_picker=None, single_date=None, make3=True, latitude=None, longitude=None, max_radius=None, fullpath_python=None):
# starting = UTCDateTime(single_date.strftime("%Y")+'-'+single_date.strftime("%m")+'-'+single_date.strftime("%d")+'T00:00:00.0')
# stopping = starting + 86430
starting = UTCDateTime(single_date.strftime("%Y")+'-'+single_date.strftime("%m")+'-'+single_date.strftime("%d")+'T00:00:00.0')
stopping = starting + 86430
dir1 = project_folder+'/'+dirname
#print(single_date.strftime("%Y%m%d"))
#print(dir1+'/1dassociator_'+project_code+'.db')
if os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):
os.remove(dir1+'/1dassociator_'+project_code+'.db')
db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'
# if os.path.exists(dir1+'/tt_ex_1D_'+project_code+'.db'):
# os.remove(dir1+'/tt_ex_1D_'+project_code+'.db')
# db_tt='sqlite:///'+dir1+'/tt_ex_1D_'+project_code+'.db' # Traveltime database44.448,longitude=-115.136
# print(db_tt)
# if local:
# inventory = build_tt_tables_local_directory(dirname=dirname,project_folder=project_folder,channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)
# else:
# inventory = build_tt_tables(lat1=latitude,long1=longitude,maxrad=max_radius,starting=starting, stopping=stopping, channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)
engine_assoc=create_engine(db_assoc, echo=False, connect_args={'check_same_thread': False})
tables1D.Base.metadata.create_all(engine_assoc)
Session=sessionmaker(bind=engine_assoc)
session=Session()
filelist = glob.glob(dir1+'/*mseed') or glob.glob(dir1+'/*SAC')
stations = set()
for file1 in filelist:
station = file1.split('.')[1]
net = file1.split('.')[0].split('/')[-1]
netsta = net+'.'+station
print(file1.split('.')[1])
stations.add(netsta)
#### create infile
day_strings = []
for stationin in stations:
station3 = glob.glob(dir1+'/*'+stationin+'.*mseed') or glob.glob(dir1+'/*'+stationin+'.*SAC')
station3a = [None,None,None]
if len(station3)>3:
#print(station3)
ind1 = np.empty((len(station3),1))
ind1[:] = np.nan
for idxs, station1 in enumerate(station3):
if get_chan3(station1) == 'HHZ':
ind1[idxs] = 2
elif get_chan3(station1) == 'HHN' or get_chan3(station1) == 'HH1':
ind1[idxs] = 0
elif get_chan3(station1) == 'HHE' or get_chan3(station1) == 'HH2':
ind1[idxs] = 1
#print(idxs)
#if ind1:
# station3a[ind1] = station1
#ind2 = np.argwhere(~np.isnan(ind1))[:,0]
for idxsa, ind2a in enumerate(ind1):
if ~np.isnan(ind2a[0]):
#print(ind2a)
#print(station3a)
station3a[int(ind2a[0])] = station3[idxsa]
else:
for station1 in station3:
if get_chan1(station1) == 'Z':
ind1 = 2
elif get_chan1(station1) == 'N' or get_chan1(station1) == '1':
ind1 = 0
elif get_chan1(station1) == 'E' or get_chan1(station1) == '2':
ind1 = 1
#print(ind1)
station3a[ind1] = station1
if any(elem is None for elem in station3a):
if make3: #make single vertical comp, 3 channels
if station3a[-1] is not None and station3a[0] is None and station3a[1] is None:
st = read(station3a[-1])
st[0].stats.channel = st[0].stats.channel[0:2]+'E'
if len(station3a[-1].split('__')) == 1:
st[0].write('.'.join(station3a[-1].split('__')[0].split('.')[0:3])+'.'+st[0].stats.channel[0:2]+'E.mseed')
else:
st[0].write('.'.join(station3a[-1].split('__')[0].split('.')[0:3])+'.'+st[0].stats.channel[0:2]+'E'+'__'+'__'.join(station3a[-1].split('__')[1:3]))
if len(station3a[-1].split('__')) == 1:
st[0].write('.'.join(station3a[-1].split('__')[0].split('.')[0:3])+'.'+st[0].stats.channel[0:2]+'N.mseed')
else:
st[0].write('.'.join(station3a[-1].split('__')[0].split('.')[0:3])+'.'+st[0].stats.channel[0:2]+'N'+'__'+'__'.join(station3a[-1].split('__')[1:3]))
station3 = glob.glob(dir1+'/*'+stationin+'.*mseed') or glob.glob(dir1+'/*'+stationin+'.*SAC')
station3a = [None,None,None]
for station1 in station3:
if get_chan1(station1) == 'Z':
ind1 = 2
elif get_chan1(station1) == 'N' or get_chan1(station1) == '1':
ind1 = 0
elif get_chan1(station1) == 'E' or get_chan1(station1) == '2':
ind1 = 1
#print(ind1)
station3a[ind1] = station1
print(station3a)
if any(elem is None for elem in station3a):
continue
#continue
day_strings.append((station3a[0]+' '+station3a[1]+' '+station3a[2]))
day_string = "\n".join(day_strings)
with open(dir1+'/dayfile.in', "w") as open_file:
open_file.write(day_string)
infile = dir1+'/dayfile.in'
outfile = dir1+'/gpd_picks.out'
#gpd_predict.py -V -P -I infile -O outflie
#os.system("gpd_predict.py -V -P -I %s -O %s")%(infile, outfile)
#gpd_predict(inputfile=infile,outputfile=outfile)
fileinassociate = outfile
if local:
inv = Inventory()
dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')
for file1 in dir1a:
inv1a = read_inventory(file1)
inv.networks.extend(inv1a)
else:
fdsnclient=Client()
inv=fdsnclient.get_stations(starttime=starting,endtime=stopping,latitude=latitude,longitude=longitude,maxradius=max_radius,channel='*HZ',level='channel')
if machine == True and machine_picker is None:
machine_picker = 'GPD'
if machine == True and machine_picker == 'GPD':
fullpath1 = pathgpd+'/gpd_predict.py'
if fullpath_python:
os.system(fullpath_python+" "+fullpath1+" -V -P -I %s -O %s -F %s" % (infile, outfile, pathgpd))
else:
os.system("gpd_predict -V -P -I %s -O %s -F %s" % (infile, outfile, pathgpd))
gpd_pick_add(dbsession=session,fileinput=fileinassociate,inventory=inv)
elif machine == True and machine_picker == 'EQTransformer':
fullpath2 = pathEQT+'/mseed_predictor.py'
if fullpath_python:
os.system(fullpath_python+" "+fullpath2+" -I %s -O %s -F %s" % (infile, outfile, pathEQT))
else:
os.system("mseed_predictor -I %s -O %s -F %s" % (infile, outfile, pathEQT))
gpd_pick_add(dbsession=session,fileinput=fileinassociate,inventory=inv)
else:
picker = fbpicker.FBPicker(t_long = 5, freqmin = 1, mode = 'rms', t_ma = 20, nsigma = 7, t_up = 0.7, nr_len = 2, nr_coeff = 2, pol_len = 10, pol_coeff = 10, uncert_coeff = 3)
fb_pick(dbengine=engine_assoc,picker=picker,fileinput=infile)
def association_continuous(dirname=None, project_folder=None, project_code=None, maxdist = None, maxkm=None, single_date=None, local=True, nsta_declare=4, delta_distance=1, latitude=None, longitude=None, max_radius=None, model=None):
starting = UTCDateTime(single_date.strftime("%Y")+'-'+single_date.strftime("%m")+'-'+single_date.strftime("%d")+'T00:00:00.0')
stopping = starting + 86430
dir1 = project_folder+'/'+dirname
print(single_date.strftime("%Y%m%d"))
#print(dir1+'/1dassociator_'+project_code+'.db')
# if os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):
# os.remove(dir1+'/1dassociator_'+project_code+'.db')
# db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'
if os.path.exists(dir1+'/tt_ex_1D_'+project_code+'.db'):
os.remove(dir1+'/tt_ex_1D_'+project_code+'.db')
db_tt='sqlite:///'+dir1+'/tt_ex_1D_'+project_code+'.db' # Traveltime database44.448,longitude=-115.136
print(db_tt)
if local:
inventory = build_tt_tables_local_directory(dirname=dirname,project_folder=project_folder,channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5., delta_distance=delta_distance, model=model)
else:
inventory = build_tt_tables(lat1=latitude,long1=longitude,maxrad=max_radius,starting=starting, stopping=stopping, channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5., delta_distance=delta_distance, model=model)
inventory.write(dir1+'/dailyinventory.xml',format="STATIONXML")
if not os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):
db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'
engine_assoc=create_engine(db_assoc, echo=False, connect_args={'check_same_thread': False})
tables1D.Base.metadata.create_all(engine_assoc)
Session=sessionmaker(bind=engine_assoc)
session=Session()
gpd_pick_add(dbsession=session,fileinput=dir1+'/gpd_picks.out',inventory=inventory)
db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'
assocXX=assoc1D.LocalAssociator(db_assoc, db_tt, max_km = maxkm, aggregation = 1, aggr_norm = 'L2', cutoff_outlier = 10, assoc_ot_uncert = 3, nsta_declare = nsta_declare, loc_uncert_thresh = 0.2)
print("aggregate")
t0=datetime.utcnow()
# Identify candidate events (Pick Aggregation)
assocXX.id_candidate_events()
t1=datetime.utcnow()
print('Took '+str(t1-t0))
print("associate")
# Associate events
assocXX.associate_candidates()
t2=datetime.utcnow()
print('Took '+str(t2-t1))
# Add singles stations to events
try:
assocXX.single_phase()
except:
pass
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file, check_same_thread = False)
return conn
except Error as e:
print(e)
return None
def hypo_station(project_folder=None, project_code=None, catalog_year=None, year=None):
hypo71_string_sta = ""
station_strings = []
f1 = open(project_folder+'/'+'sta','w')
#f2 = open(project_folder+'/'+'station.dat', 'w')
#for stas in temp:
if catalog_year:
files = sorted(glob.glob(project_folder+'/'+str(year)+'*/tt*'+project_code+'.db'))
else:
files = sorted(glob.glob(project_folder+'/*/tt*'+project_code+'.db')) or glob.glob(project_folder+'/tt*'+project_code+'.db')
#print(files)
stas1 = pd.DataFrame()
for dfilesta in files:
conn1 = create_connection(dfilesta)
with conn1:
cur1 = conn1.cursor()
cur1.execute("SELECT * FROM stations")
#rows = cur1.fetchall()
for row in cur1:
#print(row[0],row[1])
#(row[0])
df4 = pd.DataFrame()
df4 = pd.DataFrame({'station': row[1], 'net':row[2],'latitude':row[4],'longitude':row[5],'elevation':row[6]}, index=[0])
stas1=stas1.append(df4)
conn1.close()
stas1 = stas1.drop_duplicates()
stas1 = stas1.reset_index(drop=True)
print(stas1)
for idx1 in stas1.index:
stas = stas1.iloc[idx1]
print(stas)
# temp = stas1[stas1['station'].str.contains(sta_used)]
# stas = temp.iloc[0]
if len(stas['station'])>4:
sta = stas['station'][1:]
else:
sta = stas['station']
lon = stas['longitude']
lon_deg = int(abs(lon))
lon_min = (abs(lon) - abs(lon_deg)) * 60.
lat = stas['latitude']
lat_deg = int(abs(lat))
lat_min = (abs(lat) - abs(lat_deg)) * 60.
hem_NS = 'N'
hem_EW = 'E'
if lat < 0:
hem_NS = 'S'
if lon < 0:
hem_EW = 'W'
# hypo 71 format uses elevation in meters not kilometers
ele = stas['elevation']
hypo71_string_sta += fmt % (sta, lat_deg, lat_min, hem_NS,
lon_deg, lon_min, hem_EW, ele)
station_strings.append("%s %.6f %.6f %i" % (sta, stas['latitude'], stas['longitude'], stas['elevation']))
#print(hypo71_string_sta)
station_string = "\n".join(station_strings)
with open(project_folder+'/'+'station.dat', "w") as open_file:
open_file.write(station_string)
f1.write(str(hypo71_string_sta))
f1.close()
def select_all_associated(conn, f0):
"""
Query all rows in the associated table
:param conn: the Connection object
:return:
"""
cur1 = conn.cursor()
cur1.execute("SELECT * FROM associated")
stalistall = set()
rows = cur1.fetchall()
dfs1 = | pd.DataFrame() | pandas.DataFrame |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
dates = pd.date_range(cls.START_DATE, cls.END_DATE)
assert len(dates) == 4, "Expected four dates."
# Two assets with the same ticker, ending on days[1] and days[3], plus
# a benchmark that spans the whole period.
cls.sids = [1, 2, 3]
cls.asset_starts = [dates[0], dates[2]]
cls.asset_ends = [dates[1], dates[3]]
return pd.DataFrame.from_records([
{'symbol': 'DUP',
'start_date': cls.asset_starts[0],
'end_date': cls.asset_ends[0],
'exchange': 'TEST',
'asset_name': 'FIRST'},
{'symbol': 'DUP',
'start_date': cls.asset_starts[1],
'end_date': cls.asset_ends[1],
'exchange': 'TEST',
'asset_name': 'SECOND'},
{'symbol': 'BENCH',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST',
'asset_name': 'BENCHMARK'},
], index=cls.sids)
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
set_symbol_lookup_date = zipline.api.set_symbol_lookup_date
def initialize(context):
set_symbol_lookup_date(self.asset_ends[0])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[0])
set_symbol_lookup_date(self.asset_ends[1])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[1])
with self.assertRaises(UnsupportedDatetimeFormat):
set_symbol_lookup_date('foobar')
self.run_algorithm(initialize=initialize)
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2020-09-01', tz='utc')
END_DATE = pd.Timestamp('2020-09-04', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(
{
'open': [90, 95, 100, 105],
'high': [90, 95, 100, 105],
'low': [90, 95, 100, 105],
'close': [90, 95, 100, 105],
'volume': 100,
},
index=cls.equity_daily_bar_days,
)
return ((sid, frame) for sid in sids)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1000: {
'symbol': 'CLF06',
'root_symbol': 'CL',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'auto_close_date': cls.END_DATE + cls.trading_calendar.day,
'exchange': 'CMES',
'multiplier': 100,
},
},
orient='index',
)
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': 2.0,
'high': 2.0,
'low': 2.0,
'close': 2.0,
'volume': 100,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
def test_portfolio_exited_position(self):
# This test ensures ensures that 'phantom' positions do not appear in
# context.portfolio.positions in the case that a position has been
# entered and fully exited.
def initialize(context, sids):
context.ordered = False
context.exited = False
context.sids = sids
def handle_data(context, data):
if not context.ordered:
for s in context.sids:
context.order(context.sid(s), 1)
context.ordered = True
if not context.exited:
amounts = [pos.amount for pos
in itervalues(context.portfolio.positions)]
if (
len(amounts) > 0 and
all([(amount == 1) for amount in amounts])
):
for stock in context.portfolio.positions:
context.order(context.sid(stock), -1)
context.exited = True
# Should be 0 when all positions are exited.
context.record(num_positions=len(context.portfolio.positions))
result = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
sids=self.ASSET_FINDER_EQUITY_SIDS,
)
expected_position_count = [
0, # Before entering the first position
2, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(result.iloc[i,:]['num_positions'], expected)
def test_noop_orders(self):
asset = self.asset_finder.retrieve_asset(1)
# Algorithm that tries to buy with extremely low stops/limits and tries
# to sell with extremely high versions of same. Should not end up with
# any positions for reasonable data.
def handle_data(algo, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
algo.order(asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
algo.order(asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
algo.order(asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
algo.order(asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
algo.order(asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
algo.order(asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
algo.order(asset, 100, limit_price=.00000001)
algo.order(asset, -100, stop_price=.00000001)
daily_stats = self.run_algorithm(handle_data=handle_data)
# Verify that positions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
def test_position_weights(self):
sids = (1, 133, 1000)
equity_1, equity_133, future_1000 = \
self.asset_finder.retrieve_all(sids)
def initialize(algo, sids_and_amounts, *args, **kwargs):
algo.ordered = False
algo.sids_and_amounts = sids_and_amounts
algo.set_commission(
us_equities=PerTrade(0), us_futures=PerTrade(0),
)
algo.set_slippage(
us_equities=FixedSlippage(0),
us_futures=FixedSlippage(0),
)
def handle_data(algo, data):
if not algo.ordered:
for s, amount in algo.sids_and_amounts:
algo.order(algo.sid(s), amount)
algo.ordered = True
algo.record(
position_weights=algo.portfolio.current_portfolio_weights,
)
daily_stats = self.run_algorithm(
sids_and_amounts=zip(sids, [2, -1, 1]),
initialize=initialize,
handle_data=handle_data,
)
expected_position_weights = [
# No positions held on the first day.
pd.Series({}),
# Each equity's position value is its price times the number of
# shares held. In this example, we hold a long position in 2 shares
# of equity_1 so its weight is (95.0 * 2) = 190.0 divided by the
# total portfolio value. The total portfolio value is the sum of
# cash ($905.00) plus the value of all equity positions.
#
# For a futures contract, its weight is the unit price times number
# of shares held times the multiplier. For future_1000, this is
# (2.0 * 1 * 100) = 200.0 divided by total portfolio value.
pd.Series({
equity_1: 190.0 / (190.0 - 95.0 + 905.0),
equity_133: -95.0 / (190.0 - 95.0 + 905.0),
future_1000: 200.0 / (190.0 - 95.0 + 905.0),
}),
pd.Series({
equity_1: 200.0 / (200.0 - 100.0 + 905.0),
equity_133: -100.0 / (200.0 - 100.0 + 905.0),
future_1000: 200.0 / (200.0 - 100.0 + 905.0),
}),
pd.Series({
equity_1: 210.0 / (210.0 - 105.0 + 905.0),
equity_133: -105.0 / (210.0 - 105.0 + 905.0),
future_1000: 200.0 / (210.0 - 105.0 + 905.0),
}),
]
for i, expected in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected)
class TestBeforeTradingStart(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 10000
SIM_PARAMS_DATA_FREQUENCY = 'minute'
EQUITY_DAILY_BAR_LOOKBACK_DAYS = EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 1
DATA_PORTAL_FIRST_TRADING_DAY = pd.Timestamp("2016-01-05", tz='UTC')
EQUITY_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
data_start = ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp(
'2016-01-05',
tz='utc',
)
SPLIT_ASSET_SID = 3
ASSET_FINDER_EQUITY_SIDS = 1, 2, SPLIT_ASSET_SID
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_in_range(
cls.data_start,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(minutes_count) + 1
split_data = pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': 100 * minutes_arr,
},
index=asset_minutes,
)
split_data.iloc[480:] = split_data.iloc[480:] / 2.0
for sid in (1, 8554):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
yield 2, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
50,
)
yield cls.SPLIT_ASSET_SID, split_data
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
}
])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
for sid in sids:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
def test_data_in_bts_minute(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
60,
"1m"
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# fetching data at midnight gets us the previous market minute's data
self.assertEqual(240, results.iloc[0].the_price1)
self.assertEqual(242, results.iloc[0].the_high1)
# make sure that price is ffilled, but not other fields
self.assertEqual(350, results.iloc[0].the_price2)
self.assertTrue(np.isnan(results.iloc[0].the_high2))
# 10-minute history
# asset1 day1 price should be 331-390
np.testing.assert_array_equal(
range(331, 391), algo.history_values[0]["price"][1]
)
# asset1 day1 high should be 333-392
np.testing.assert_array_equal(
range(333, 393), algo.history_values[0]["high"][1]
)
# asset2 day1 price should be 19 300s, then 40 350s
np.testing.assert_array_equal(
[300] * 19, algo.history_values[0]["price"][2][0:19]
)
np.testing.assert_array_equal(
[350] * 40, algo.history_values[0]["price"][2][20:]
)
# asset2 day1 high should be all NaNs except for the 19th item
# = 2016-01-05 20:20:00+00:00
np.testing.assert_array_equal(
np.full(19, np.nan), algo.history_values[0]["high"][2][0:19]
)
self.assertEqual(352, algo.history_values[0]["high"][2][19])
np.testing.assert_array_equal(
np.full(40, np.nan), algo.history_values[0]["high"][2][20:]
)
def test_data_in_bts_daily(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
1,
"1d",
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
self.assertEqual(392, results.the_high1[0])
self.assertEqual(390, results.the_price1[0])
# nan because asset2 only trades every 50 minutes
self.assertTrue(np.isnan(results.the_high2[0]))
self.assertTrue(350, results.the_price2[0])
self.assertEqual(392, algo.history_values[0]["high"][1][0])
self.assertEqual(390, algo.history_values[0]["price"][1][0])
self.assertEqual(352, algo.history_values[0]["high"][2][0])
self.assertEqual(350, algo.history_values[0]["price"][2][0])
def test_portfolio_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data
assert (context.hd_portfolio == bts_portfolio)
record(pos_value=bts_portfolio.positions_value)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Asset starts with price 1 on 1/05 and increases by 1 every minute.
# Simulation starts on 1/06, where the price in bts is 390, and
# positions_value is 0. On 1/07, price is 780, and after buying one
# share on the first bar of 1/06, positions_value is 780
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
def test_account_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=context.account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Starting portfolio value is 10000. Order for the asset fills on the
# second bar of 1/06, where the price is 391, and costs the default
# commission of 0. On 1/07, the price is 780, and the increase in
# portfolio value is 780-392-0
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0,
places=2)
def test_portfolio_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data, except for the positions
for k in bts_portfolio.__dict__:
if k != 'positions':
assert (context.hd_portfolio.__dict__[k]
== bts_portfolio.__dict__[k])
record(pos_value=bts_portfolio.positions_value)
record(pos_amount=bts_portfolio.positions[sid(3)].amount)
record(
last_sale_price=bts_portfolio.positions[sid(3)].last_sale_price
)
def handle_data(context, data):
if not context.ordered:
order(sid(3), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, positions value should by 780, same as without split
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
# On 1/07, after applying the split, 1 share becomes 2
self.assertEqual(results.pos_amount.iloc[0], 0)
self.assertEqual(results.pos_amount.iloc[1], 2)
# On 1/07, after applying the split, last sale price is halved
self.assertEqual(results.last_sale_price.iloc[0], 0)
self.assertEqual(results.last_sale_price.iloc[1], 390)
def test_account_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=bts_account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, portfolio value is the same as without split
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0, places=2)
class TestAlgoScript(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-12-31', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
EQUITY_DAILY_BAR_LOOKBACK_DAYS = 5 # max history window length
STRING_TYPE_NAMES = [s.__name__ for s in string_types]
STRING_TYPE_NAMES_STRING = ', '.join(STRING_TYPE_NAMES)
ASSET_TYPE_NAME = Asset.__name__
CONTINUOUS_FUTURE_NAME = ContinuousFuture.__name__
ASSET_OR_STRING_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME] +
STRING_TYPE_NAMES)
ASSET_OR_STRING_OR_CF_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME,
CONTINUOUS_FUTURE_NAME] +
STRING_TYPE_NAMES)
ARG_TYPE_TEST_CASES = (
('history__assets', (bad_type_history_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history__fields', (bad_type_history_fields,
STRING_TYPE_NAMES_STRING,
True)),
('history__bar_count', (bad_type_history_bar_count, 'int', False)),
('history__frequency', (bad_type_history_frequency,
STRING_TYPE_NAMES_STRING,
False)),
('current__assets', (bad_type_current_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current__fields', (bad_type_current_fields,
STRING_TYPE_NAMES_STRING,
True)),
('is_stale__assets', (bad_type_is_stale_assets, 'Asset', True)),
('can_trade__assets', (bad_type_can_trade_assets, 'Asset', True)),
('history_kwarg__assets',
(bad_type_history_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg_bad_list__assets',
(bad_type_history_assets_kwarg_list,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg__fields',
(bad_type_history_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
('history_kwarg__bar_count',
(bad_type_history_bar_count_kwarg, 'int', False)),
('history_kwarg__frequency',
(bad_type_history_frequency_kwarg, STRING_TYPE_NAMES_STRING, False)),
('current_kwarg__assets',
(bad_type_current_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current_kwarg__fields',
(bad_type_current_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
)
sids = 0, 1, 3, 133
# FIXME: Pass a benchmark explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
data = make_simple_equity_info(
cls.sids,
cls.START_DATE,
cls.END_DATE,
)
data.loc[3, 'symbol'] = 'TEST'
return data
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cal = cls.trading_calendars[Equity]
sessions = cal.sessions_in_range(cls.START_DATE, cls.END_DATE)
frame = pd.DataFrame({
'close': 10., 'high': 10.5, 'low': 9.5, 'open': 10., 'volume': 100,
}, index=sessions)
for sid in sids:
yield sid, frame
def test_noop(self):
self.run_algorithm(
initialize=initialize_noop,
handle_data=handle_data_noop,
)
def test_noop_string(self):
self.run_algorithm(script=noop_algo)
def test_no_handle_data(self):
self.run_algorithm(script=no_handle_data)
def test_api_calls(self):
self.run_algorithm(
initialize=initialize_api,
handle_data=handle_data_api,
)
def test_api_calls_string(self):
self.run_algorithm(script=api_algo)
def test_api_get_environment(self):
platform = 'zipline'
algo = self.make_algo(
script=api_get_environment_algo,
platform=platform,
)
algo.run()
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
self.run_algorithm(script=api_symbol_algo)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = self.make_algo(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data.current(sid(0), "price"))
context.incr += 1""",
)
results = test_algo.run()
# flatten the list of txns
all_txns = [val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
expected_spread = 0.05
expected_price = test_algo.recorded_vars["price"] - expected_spread
self.assertEqual(expected_price, txn['price'])
# make sure that the $100 commission was applied to our cash
# the txn was for -1000 shares at 9.95, means -9.95k. our capital_used
# for that day was therefore 9.95k, but after the $100 commission,
# it should be 9.85k.
self.assertEqual(9850, results.capital_used[1])
self.assertEqual(100, results["orders"].iloc[1][0]["commission"])
@parameterized.expand(
[
('no_minimum_commission', 0,),
('default_minimum_commission', 0,),
('alternate_minimum_commission', 2,),
]
)
def test_volshare_slippage(self, name, minimum_commission):
tempdir = TempDirectory()
try:
if name == "default_minimum_commission":
commission_line = "set_commission(commission.PerShare(0.02))"
else:
commission_line = \
"set_commission(commission.PerShare(0.02, " \
"min_trade_cost={0}))".format(minimum_commission)
# verify order -> transaction -> portfolio position.
# --------------
# XXX: This is the last remaining consumer of
# create_daily_trade_source.
trades = factory.create_daily_trade_source(
[0], self.sim_params, self.asset_finder, self.trading_calendar
)
data_portal = create_data_portal_from_trade_history(
self.asset_finder, self.trading_calendar, tempdir,
self.sim_params, {0: trades}
)
test_algo = self.make_algo(
data_portal=data_portal,
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
{0}
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data.current(sid(0), "price"))
record(volume=data.current(sid(0), "volume"))
record(incr=context.incr)
context.incr += 1
""".format(commission_line),
)
results = test_algo.run()
all_txns = [
val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 67)
# all_orders are all the incremental versions of the
# orders as each new fill comes in.
all_orders = list(toolz.concat(results['orders']))
if minimum_commission == 0:
# for each incremental version of each order, the commission
# should be its filled amount * 0.02
for order_ in all_orders:
self.assertAlmostEqual(
order_["filled"] * 0.02,
order_["commission"]
)
else:
# the commission should be at least the min_trade_cost
for order_ in all_orders:
if order_["filled"] > 0:
self.assertAlmostEqual(
max(order_["filled"] * 0.02, minimum_commission),
order_["commission"]
)
else:
self.assertEqual(0, order_["commission"])
finally:
tempdir.cleanup()
def test_incorrectly_set_futures_slippage_model(self):
code = dedent(
"""
from zipline.api import set_slippage, slippage
class MySlippage(slippage.FutureSlippageModel):
def process_order(self, data, order):
return data.current(order.asset, 'price'), order.amount
def initialize(context):
set_slippage(MySlippage())
"""
)
test_algo = self.make_algo(script=code)
with self.assertRaises(IncompatibleSlippageModel):
# Passing a futures slippage model as the first argument, which is
# for setting equity models, should fail.
test_algo.run()
def test_algo_record_vars(self):
test_algo = self.make_algo(script=record_variables)
results = test_algo.run()
for i in range(1, 252):
self.assertEqual(results.iloc[i-1]["incr"], i)
def test_algo_record_nan(self):
test_algo = self.make_algo(script=record_float_magic % 'nan')
results = test_algo.run()
for i in range(1, 252):
self.assertTrue(np.isnan(results.iloc[i-1]["data"]))
def test_batch_market_order_matches_multiple_manual_orders(self):
share_counts = pd.Series([50, 100])
multi_blotter = RecordBatchBlotter()
multi_test_algo = self.make_algo(
script=dedent("""\
from collections import OrderedDict
from six import iteritems
from zipline.api import sid, order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
it = zip(context.assets, {share_counts})
for asset, shares in it:
order(asset, shares)
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=multi_blotter,
)
multi_stats = multi_test_algo.run()
self.assertFalse(multi_blotter.order_batch_called)
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 2, \
"len(orders) was %s but expected 2" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=batch_blotter,
)
batch_stats = batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
for stats in (multi_stats, batch_stats):
stats.orders = stats.orders.apply(
lambda orders: [toolz.dissoc(o, 'id') for o in orders]
)
stats.transactions = stats.transactions.apply(
lambda txns: [toolz.dissoc(txn, 'order_id') for txn in txns]
)
assert_equal(multi_stats, batch_stats)
def test_batch_market_order_filters_null_orders(self):
share_counts = [50, 0]
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 1, \
"len(orders) was %s but expected 1" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=share_counts),
blotter=batch_blotter,
)
batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
def test_order_dead_asset(self):
# after asset 0 is dead
params = SimulationParameters(
start_session=pd.Timestamp("2007-01-03", tz='UTC'),
end_session=pd.Timestamp("2007-01-05", tz='UTC'),
trading_calendar=self.trading_calendar,
)
# order method shouldn't blow up
self.run_algorithm(
script="""
from zipline.api import order, sid
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
""",
)
# order_value and order_percent should blow up
for order_str in ["order_value", "order_percent"]:
test_algo = self.make_algo(
script="""
from zipline.api import order_percent, order_value, sid
def initialize(context):
pass
def handle_data(context, data):
{0}(sid(0), 10)
""".format(order_str),
sim_params=params,
)
with self.assertRaises(CannotOrderDelistedAsset):
test_algo.run()
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
self.run_algorithm(script=access_portfolio_in_init)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
self.run_algorithm(script=access_account_in_init)
def test_without_kwargs(self):
"""
Test that api methods on the data object can be called with positional
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(sim_params=params, script=call_without_kwargs)
def test_good_kwargs(self):
"""
Test that api methods on the data object can be called with keyword
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(script=call_with_kwargs, sim_params=params)
@parameterized.expand([('history', call_with_bad_kwargs_history),
('current', call_with_bad_kwargs_current)])
def test_bad_kwargs(self, name, algo_text):
"""
Test that api methods on the data object called with bad kwargs return
a meaningful TypeError that we create, rather than an unhelpful cython
error
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual("%s() got an unexpected keyword argument 'blahblah'"
% name, cm.exception.args[0])
@parameterized.expand(ARG_TYPE_TEST_CASES)
def test_arg_types(self, name, inputs):
keyword = name.split('__')[1]
algo = self.make_algo(script=inputs[0])
with self.assertRaises(TypeError) as cm:
algo.run()
expected = "Expected %s argument to be of type %s%s" % (
keyword,
'or iterable of type ' if inputs[2] else '',
inputs[1]
)
self.assertEqual(expected, cm.exception.args[0])
def test_empty_asset_list_to_history(self):
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(
script=dedent("""
def initialize(context):
pass
def handle_data(context, data):
data.history([], "price", 5, '1d')
"""),
sim_params=params,
)
@parameterized.expand(
[('bad_kwargs', call_with_bad_kwargs_get_open_orders),
('good_kwargs', call_with_good_kwargs_get_open_orders),
('no_kwargs', call_with_no_kwargs_get_open_orders)]
)
def test_get_open_orders_kwargs(self, name, script):
algo = self.make_algo(script=script)
if name == 'bad_kwargs':
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual('Keyword argument `sid` is no longer '
'supported for get_open_orders. Use `asset` '
'instead.', cm.exception.args[0])
else:
algo.run()
def test_empty_positions(self):
"""
Test that when we try context.portfolio.positions[stock] on a stock
for which we have no positions, we return a Position with values 0
(but more importantly, we don't crash) and don't save this Position
to the user-facing dictionary PositionTracker._positions_store
"""
results = self.run_algorithm(script=empty_positions)
num_positions = results.num_positions
amounts = results.amounts
self.assertTrue(all(num_positions == 0))
self.assertTrue(all(amounts == 0))
def test_schedule_function_time_rule_positionally_misplaced(self):
"""
Test that when a user specifies a time rule for the date_rule argument,
but no rule in the time_rule argument
(e.g. schedule_function(func, <time_rule>)), we assume that means
assign a time rule but no date rule
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-12', tz='UTC'),
end=pd.Timestamp('2006-01-13', tz='UTC'),
data_frequency='minute'
)
algocode = dedent("""
from zipline.api import time_rules, schedule_function
def do_at_open(context, data):
context.done_at_open.append(context.get_datetime())
def do_at_close(context, data):
context.done_at_close.append(context.get_datetime())
def initialize(context):
context.done_at_open = []
context.done_at_close = []
schedule_function(do_at_open, time_rules.market_open())
schedule_function(do_at_close, time_rules.market_close())
def handle_data(algo, data):
pass
""")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
algo = self.make_algo(script=algocode, sim_params=sim_params)
algo.run()
self.assertEqual(len(w), 2)
for i, warning in enumerate(w):
self.assertIsInstance(warning.message, UserWarning)
self.assertEqual(
warning.message.args[0],
'Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule'
)
# The warnings come from line 13 and 14 in the algocode
self.assertEqual(warning.lineno, 13 + i)
self.assertEqual(
algo.done_at_open,
[pd.Timestamp('2006-01-12 14:31:00', tz='UTC'),
pd.Timestamp('2006-01-13 14:31:00', tz='UTC')]
)
self.assertEqual(
algo.done_at_close,
[pd.Timestamp('2006-01-12 20:59:00', tz='UTC'),
pd.Timestamp('2006-01-13 20:59:00', tz='UTC')]
)
class TestCapitalChanges(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-09', tz='UTC')
# XXX: This suite only has daily data for sid 0 and only has minutely data
# for sid 1.
sids = ASSET_FINDER_EQUITY_SIDS = (0, 1)
DAILY_SID = 0
MINUTELY_SID = 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_minute_bar_data(cls):
minutes = cls.trading_calendar.minutes_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(100, 100 + len(minutes), 1)
opens = closes
highs = closes + 5
lows = closes - 5
frame = pd.DataFrame(
index=minutes,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.MINUTELY_SID, frame
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
days = cls.trading_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(10.0, 10.0 + len(days), 1.0)
opens = closes
highs = closes + 0.5
lows = closes - 0.5
frame = pd.DataFrame(
index=days,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.DAILY_SID, frame
@parameterized.expand([
('target', 151000.0), ('delta', 50000.0)
])
def test_capital_changes_daily_mode(self, change_type, value):
capital_changes = {
pd.Timestamp('2006-01-06', tz='UTC'):
{'type': change_type, 'value': value}
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(0), 1000)
"""
algo = self.make_algo(
script=algocode,
capital_changes=capital_changes,
sim_params=SimulationParameters(
start_session=self.START_DATE,
end_session=self.END_DATE,
trading_calendar=self.nyse_calendar,
)
)
# We call get_generator rather than `run()` here because we care about
# the raw capital change packets.
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), 1)
self.assertEqual(
capital_change_packets[0],
{'date': pd.Timestamp('2006-01-06', tz='UTC'),
'type': 'cash',
'target': 151000.0 if change_type == 'target' else None,
'delta': 50000.0})
# 1/03: price = 10, place orders
# 1/04: orders execute at price = 11, place orders
# 1/05: orders execute at price = 12, place orders
# 1/06: +50000 capital change,
# orders execute at price = 13, place orders
# 1/09: orders execute at price = 14, place orders
expected_daily = {}
expected_capital_changes = np.array([
0.0, 0.0, 0.0, 50000.0, 0.0
])
# Day 1, no transaction. Day 2, we transact, but the price of our stock
# does not change. Day 3, we start getting returns
expected_daily['returns'] = np.array([
0.0,
0.0,
# 1000 shares * gain of 1
(100000.0 + 1000.0) / 100000.0 - 1.0,
# 2000 shares * gain of 1, capital change of +50000
(151000.0 + 2000.0) / 151000.0 - 1.0,
# 3000 shares * gain of 1
(153000.0 + 3000.0) / 153000.0 - 1.0,
])
expected_daily['pnl'] = np.array([
0.0,
0.0,
1000.00, # 1000 shares * gain of 1
2000.00, # 2000 shares * gain of 1
3000.00, # 3000 shares * gain of 1
])
expected_daily['capital_used'] = np.array([
0.0,
-11000.0, # 1000 shares at price = 11
-12000.0, # 1000 shares at price = 12
-13000.0, # 1000 shares at price = 13
-14000.0, # 1000 shares at price = 14
])
expected_daily['ending_cash'] = \
np.array([100000.0] * 5) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
expected_daily['starting_value'] = np.array([
0.0,
0.0,
11000.0, # 1000 shares at price = 11
24000.0, # 2000 shares at price = 12
39000.0, # 3000 shares at price = 13
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 5),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 5),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat],
err_msg='daily ' + stat,
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat],
err_msg='cumulative ' + stat,
)
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-06', tz='UTC'): 50000.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_daily_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {
pd.Timestamp(datestr, tz='UTC'): {
'type': change_type,
'value': value
}
for datestr, value in values
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': | pd.Timestamp(val[0], tz='UTC') | pandas.Timestamp |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
pd.Timestamp('2019-08-14 00:00:00'): 17677326.65707852,
pd.Timestamp('2019-08-15 00:00:00'): 26968819.12338184,
pd.Timestamp('2019-08-16 00:00:00'): 22592246.991756547,
pd.Timestamp('2019-08-17 00:00:00'): 15997597.519811645,
pd.Timestamp('2019-08-18 00:00:00'): 17731498.506244037,
pd.Timestamp('2019-08-19 00:00:00'): 22127822.876592986,
pd.Timestamp('2019-08-20 00:00:00'): 5550506.789972418},
'items': {pd.Timestamp('2019-08-01 00:00:00'): 2895,
pd.Timestamp('2019-08-02 00:00:00'): 3082,
pd.Timestamp('2019-08-03 00:00:00'): 3559,
pd.Timestamp('2019-08-04 00:00:00'): 3582,
pd.Timestamp('2019-08-05 00:00:00'): 2768,
pd.Timestamp('2019-08-06 00:00:00'): 3431,
pd.Timestamp('2019-08-07 00:00:00'): 2767,
pd.Timestamp('2019-08-08 00:00:00'): 2643,
pd.Timestamp('2019-08-09 00:00:00'): 1506,
pd.Timestamp('2019-08-10 00:00:00'): 1443,
pd.Timestamp('2019-08-11 00:00:00'): 2466,
pd.Timestamp('2019-08-12 00:00:00'): 3482,
pd.Timestamp('2019-08-13 00:00:00'): 1940,
pd.Timestamp('2019-08-14 00:00:00'): 1921,
pd.Timestamp('2019-08-15 00:00:00'): 3479,
pd.Timestamp('2019-08-16 00:00:00'): 3053,
pd.Timestamp('2019-08-17 00:00:00'): 2519,
pd.Timestamp('2019-08-18 00:00:00'): 2865,
pd.Timestamp('2019-08-19 00:00:00'): 1735,
pd.Timestamp('2019-08-20 00:00:00'): 1250},
'order_total_avg': {pd.Timestamp('2019-08-01 00:00:00'): 1182286.0960463749,
pd.Timestamp('2019-08-02 00:00:00'): 1341449.559055637,
pd.Timestamp('2019-08-03 00:00:00'): 1270616.0372525519,
pd.Timestamp('2019-08-04 00:00:00'): 1069011.1516039693,
pd.Timestamp('2019-08-05 00:00:00'): 1355304.7342628485,
pd.Timestamp('2019-08-06 00:00:00'): 1283968.435650978,
pd.Timestamp('2019-08-07 00:00:00'): 1319110.4787216866,
pd.Timestamp('2019-08-08 00:00:00'): 1027231.5196824896,
pd.Timestamp('2019-08-09 00:00:00'): 1201471.0717715647,
pd.Timestamp('2019-08-10 00:00:00'): 1314611.2300065856,
pd.Timestamp('2019-08-11 00:00:00'): 1186152.4565363638,
pd.Timestamp('2019-08-12 00:00:00'): 1155226.4552911327,
pd.Timestamp('2019-08-13 00:00:00'): 1346981.8930212667,
pd.Timestamp('2019-08-14 00:00:00'): 1019646.0386455443,
pd.Timestamp('2019-08-15 00:00:00'): 1286793.278547962,
pd.Timestamp('2019-08-16 00:00:00'): 1254721.8660029566,
pd.Timestamp('2019-08-17 00:00:00'): 1419237.673786449,
pd.Timestamp('2019-08-18 00:00:00'): 1173087.9508403398,
pd.Timestamp('2019-08-19 00:00:00'): 1162434.8033358732,
| pd.Timestamp('2019-08-20 00:00:00') | pandas.Timestamp |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = | PeriodIndex([], freq='D') | pandas.PeriodIndex |
import re
import numpy as np
import pandas as pd
from os.path import join
def read_lastfm(raw_dir, debug=None):
"""
Read the lastfm dataset from .dat file
:param raw_dir: the path to raw files (users.dat, movies.dat, ratings.dat)
:param debug: the portion of ratings userd, float
:return: artists, tags, user_artists, user_taggedartists, user_friends, pandas.Dataframe
"""
artists = []
with open(join(raw_dir, 'artists.dat'), encoding='latin1') as f:
i = True
for line in f:
if i:
i = False
continue
try:
aid, name, url, pict_url = line.strip().split('\t')
except:
continue
artists.append({
'aid': int(aid),
'name': name,
'url': url,
'pict_url': pict_url
})
artists = pd.DataFrame(artists)
tags = []
with open(join(raw_dir, 'tags.dat'), encoding='latin1') as f:
i = True
for line in f:
if i:
i = False
continue
tid, tag = line.strip().split('\t')
tags.append({
'tid': int(tid),
'tag': tag
})
tags = pd.DataFrame(tags)
user_artists = []
with open(join(raw_dir, 'user_artists.dat'), encoding='latin1') as f:
i = True
for line in f:
if i:
i = False
continue
uid, aid, listen_count = line.strip().split('\t')
user_artists.append({
'uid': int(uid),
'aid': int(aid),
'listen_count': int(listen_count),
})
user_artists = | pd.DataFrame(user_artists) | pandas.DataFrame |
import numpy as np
from pandas import Timedelta, Series
from pandas import to_timedelta
from pandas.tseries.frequencies import to_offset
from scipy import signal
def _noise_limits(y):
"""
Return upper and lower limits of a noise band. Values in this band can be considered as noise.
:param y: The signal.
:return: Tuple (lower, upper).
"""
y_sorted = np.sort(y)
s = np.vstack([np.arange(len(y_sorted)), y_sorted - y_sorted[0]]).T
n = np.array([-s[0, 1] + s[-1, 1], -len(y) + 1])
d = np.dot(s, n)
i_max = np.argmax(d)
i_min = np.argmin(d)
y_upper = y_sorted[i_max]
y_lower = y_sorted[i_min]
return y_lower, y_upper
PERIODS = ["30d", "14d", "7d", "3d", "2d", "1d", "12h", "8h", "6h", "4h", "3h", "2h", "1h", "30min", "15min"]
def periodicity(data, periods: list, dt_min=None):
"""
Return a pandas.Series with a periodicity score for a predefined set of potential periods (seasons) in the data.
:param data: A pandas.Series with a DateTimeIndex index.
:param periods: A list of time periods in string format (e.g.: ["2d", "12h", "30min"]).
:param dt_min: The time interval between values of ``data`` in minutes. If None, ``data`` must have a
DateTimeIndex with a set frequency (e.g., via ``data = data.asfreq("1min")``) so the time interval
can be inferred (default: None = infer time interval from ``data``).
:return: A pandas.Series with the periods as index and the score are the values.
"""
t1 = data.index.min().ceil("1d")
t2 = data.index.max().floor("1d")
interval = (t2 - t1)
# time interval in minutes
if dt_min is None:
dt_min = to_timedelta(to_offset(data.index.freq)).total_seconds() / 60
result = []
for p in periods:
period = | Timedelta(p) | pandas.Timedelta |
import pandas as pd
from predict_functions import build_rmsa_map, calculate_tournament_table, sort_table, predict_match
from utils.constants import Maps, Teams, calc_map_type
# Pandas options for better printing
from utils.utils import calc_match_date, calc_season
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.width', 1000)
match_data = | pd.read_csv('map_data/match_map_stats.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import operator
import warnings
from functools import wraps, partial
from numbers import Number, Integral
from operator import getitem
from pprint import pformat
import numpy as np
import pandas as pd
from pandas.util import cache_readonly, hash_pandas_object
from pandas.api.types import is_bool_dtype, is_timedelta64_dtype, \
is_numeric_dtype, is_datetime64_any_dtype
from toolz import merge, first, unique, partition_all, remove
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..utils import partial_by_order, Dispatch, IndexCallable
from .. import threaded
from ..compatibility import (apply, operator_div, bind_method, string_types,
isidentifier,
Iterator, Sequence)
from ..context import globalmethod
from ..utils import (random_state_data, pseudorandom, derived_from, funcname,
memory_repr, put_lines, M, key_split, OperatorMethodMixin,
is_arraylike, typename, skip_doctest)
from ..array.core import Array, normalize_arg
from ..array.utils import empty_like_safe
from ..blockwise import blockwise, Blockwise
from ..base import DaskMethodsMixin, tokenize, dont_optimize, is_dask_collection
from ..delayed import delayed, Delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .optimize import optimize
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error, clear_known_categories,
is_categorical_dtype, has_known_categories, PANDAS_VERSION,
index_summary, is_dataframe_like, is_series_like,
is_index_like, valid_divisions)
no_default = '__no_default__'
pd.set_option('compute.use_numexpr', False)
def _concat(args):
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if not has_parallel_type(args[0]):
try:
return pd.Series(args)
except Exception:
return args
# We filter out empty partitions here because pandas frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Ideally this would be handled locally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if len(i)]
return args[0] if not args2 else methods.concat(args2, uniform=True)
def finalize(results):
return _concat(results)
class Scalar(DaskMethodsMixin, OperatorMethodMixin):
""" A Dask object to represent a pandas scalar"""
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):
raise TypeError("Expected meta to specify scalar, got "
"{0}".format(typename(type(meta))))
self._meta = meta
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self._name
def __dask_layers__(self):
return (self.key,)
__dask_optimize__ = globalmethod(optimize, key='dataframe_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return first, ()
def __dask_postpersist__(self):
return Scalar, (self._name, self._meta, self.divisions)
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Scalar(graph, name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
name = 'delayed-' + self._name
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())
return Delayed(self.key, dsk)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dependencies = [self]
dsk = {}
return_type = get_parallel_type(other)
if isinstance(other, Scalar):
dependencies.append(other)
other_key = (other._name, 0)
elif is_dask_collection(other):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if return_type is not Scalar:
return return_type(graph, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(graph, name, meta)
class _Frame(DaskMethodsMixin, OperatorMethodMixin):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
def __init__(self, dsk, name, meta, divisions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not self._is_partition_type(meta):
raise TypeError("Expected meta to specify type {0}, got type "
"{1}".format(type(self).__name__,
typename(type(meta))))
self._meta = meta
self.divisions = tuple(divisions)
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self._name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self._name,)
def __dask_tokenize__(self):
return self._name
__dask_optimize__ = globalmethod(optimize, key='dataframe_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
"""Size of the Series or DataFrame as a Delayed object.
Examples
--------
>>> series.size # doctest: +SKIP
dd.Scalar<size-ag..., dtype=int64>
"""
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self):
""" Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name,
self._meta, self.divisions)
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# ufuncs work with 0-dimensional NumPy ndarrays
# so we don't want to raise NotImplemented
if isinstance(x, np.ndarray) and x.shape == ():
continue
elif not isinstance(x, (Number, Scalar, _Frame, Array,
pd.DataFrame, pd.Series, pd.Index)):
return NotImplemented
if method == '__call__':
if numpy_ufunc.signature is not None:
return NotImplemented
if numpy_ufunc.nout > 1:
# ufuncs with multiple output values
# are not yet supported for frames
return NotImplemented
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
else:
# ufunc methods are not yet supported for frames
return NotImplemented
@property
def _elemwise(self):
return elemwise
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = "npartitions={0}".format(self.npartitions)
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index([''] * (self.npartitions + 1), name=name)
return divisions
def __repr__(self):
data = self._repr_data().to_string(max_rows=5, show_dimensions=False)
return """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=data, name=key_split(self._name),
task=len(self.dask))
@property
def index(self):
"""Return dask Index instance"""
return self.map_partitions(getattr, 'index', token=self._name + '-index',
meta=self._meta.index)
@index.setter
def index(self, value):
self.divisions = value.divisions
result = map_partitions(methods.assign_index, self, value)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
""" Forget division information """
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
divisions = self.divisions[n:n + 2]
layer = {(name, 0): (self._name, n)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
@derived_from(pd.DataFrame)
def drop_duplicates(self, split_every=None, split_out=1, **kwargs):
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(**kwargs)
if 'subset' in kwargs and kwargs['subset'] is not None:
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {'cols': kwargs['subset']}
else:
split_out_setup = split_out_setup_kwargs = None
if kwargs.get('keep', True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every,
split_out=split_out, split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
def __bool__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.any() or a.all()."
.format(self.__class__.__name__))
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
def wrapper():
raise TypeError("cannot convert the series to "
"{0}".format(str(cast_type)))
return wrapper
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __complex__(self):
return self._scalarfunc(complex)
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Note that the index and divisions are assumed to remain unchanged.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*. Arguments
and keywords may contain ``Scalar``, ``Delayed`` or regular
python objects. DataFrame-like args (both dask and pandas) will be
repartitioned to align (if necessary) before applying the function.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
Also note that the index and divisions are assumed to remain unchanged.
If the function you're mapping changes the index/divisions, you'll need
to clear them afterwards:
>>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-
based windows.
>>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))
>>> dts = dd.from_pandas(ts, npartitions=2)
>>> dts.map_overlap(lambda df: df.rolling('2D').sum(),
... pd.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
dtype: float64
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
layer = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name2, merge(dsk2, layer), dependencies=[self])
out_df = type(self)(graph, name2, self._meta, self.divisions)
out.append(out_df)
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
return self._head(n=n, npartitions=npartitions, compute=compute, safe=True)
def _head(self, n, npartitions, compute, safe):
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if safe:
head = safe_head
else:
head = M.head
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (head, concat, n)
else:
dsk = {(name, 0): (head, (self._name, 0), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def _partitions(self, index):
if not isinstance(index, tuple):
index = (index,)
from ..array.slicing import normalize_index
index = normalize_index(index, (self.npartitions,))
index = tuple(slice(k, k + 1) if isinstance(k, Number) else k
for k in index)
name = 'blocks-' + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()
divisions = [self.divisions[i] for _, i in new_keys] + [self.divisions[new_keys[-1][1] + 1]]
dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
@property
def partitions(self):
""" Slice dataframe by partitions
This allows partitionwise slicing of a Dask Dataframe. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along partitions so, for example, ``df.partitions[:5]`` produces a new
Dask Dataframe of the first five partitions.
Examples
--------
>>> df.partitions[0] # doctest: +SKIP
>>> df.partitions[:3] # doctest: +SKIP
>>> df.partitions[::10] # doctest: +SKIP
Returns
-------
A Dask DataFrame
"""
return IndexCallable(self._partitions)
# Note: iloc is implemented only on DataFrame
def repartition(self, divisions=None, npartitions=None, freq=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output. Only used if divisions isn't
specified.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
else:
test_value = value
meta = self._meta_nonempty.fillna(value=test_value, method=method,
limit=limit, axis=axis)
if axis == 1 or method is None:
# Control whether or not dask's partition alignment happens.
# We don't want for a pandas Series.
# We do want it for a dask Series
if is_series_like(value) and not is_dask_collection(value):
args = ()
kwargs = {'value': value}
else:
args = (value,)
kwargs = {}
return self.map_partitions(M.fillna, *args, method=method,
limit=limit, axis=axis, meta=meta,
**kwargs)
if method in ('pad', 'ffill'):
method = 'ffill'
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = 'bfill'
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = 'fillna-chunk-' + tokenize(self, method)
dsk = {(name, i): (methods.fillna_check, (self._name, i),
method, i != skip_check)
for i in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
parts = new_dd_object(graph, name, meta, self.divisions)
else:
parts = self
return parts.map_overlap(M.fillna, before, after, method=method,
limit=limit, meta=meta)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method='ffill', limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method='bfill', limit=limit, axis=axis)
def sample(self, n=None, frac=None, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
n : int, optional
Number of items to return is not supported by dask. Use frac
instead.
frac : float, optional
Fraction of axis items to return.
replace : boolean, optional
Sample with or without replacement. Default = False.
random_state : int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
DataFrame.random_split
pandas.DataFrame.sample
"""
if n is not None:
msg = ("sample does not support the number of sampled items "
"parameter, 'n'. Please use the 'frac' parameter instead.")
if isinstance(n, Number) and 0 <= n <= 1:
warnings.warn(msg)
frac = n
else:
raise ValueError(msg)
if frac is None:
raise ValueError("frac must not be None")
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def replace(self, to_replace=None, value=None, regex=False):
return self.map_partitions(M.replace, to_replace=to_replace,
value=value, regex=regex)
def to_dask_array(self, lengths=None):
"""Convert a dask DataFrame to a dask array.
Parameters
----------
lengths : bool or Sequence of ints, optional
How to determine the chunks sizes for the output array.
By default, the output array will have unknown chunk lengths
along the first axis, which can cause some later operations
to fail.
* True : immediately compute the length of each partition
* Sequence : a sequence of integers to use for the chunk sizes
on the first axis. These values are *not* validated for
correctness, beyond ensuring that the number of items
matches the number of partitions.
Returns
-------
"""
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
arr = self.values
chunks = self._validate_chunks(arr, lengths)
arr._chunks = chunks
return arr
def to_hdf(self, path_or_buf, key, mode='a', append=False, **kwargs):
""" See dd.to_hdf docstring for more information """
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, **kwargs)
def to_csv(self, filename, **kwargs):
""" See dd.to_csv docstring for more information """
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_json(self, filename, *args, **kwargs):
""" See dd.to_json docstring for more information """
from .io import to_json
return to_json(self, filename, *args, **kwargs)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
See Also
--------
dask.dataframe.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, self.__dask_keys__())
name = 'delayed-' + self._name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
return [Delayed(k, graph) for k in keys]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. When not using a ``DatetimeIndex``,
the window size must not be so large as to span more than one
adjacent partition. If using an offset or offset alias like '5D',
the data must have a ``DatetimeIndex``
.. versionchanged:: 0.15.0
Now accepts offsets and string offset aliases
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if isinstance(window, Integral):
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, Integral):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
"""
.. note::
Pandas currently uses an ``object``-dtype column to represent
boolean data with missing values. This can cause issues for
boolean-specific operations, like ``|``. To enable boolean-
specific operations, at the cost of metadata that doesn't match
pandas, use ``.astype(bool)`` after the ``shift``.
"""
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.diff, token='diff', periods=periods,
axis=1)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token='diff',
periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, axis=1)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.shift, before, after, token='shift',
periods=periods)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, meta=meta,
transform_divisions=False)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
return handle_out(out, result)
else:
result = self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
@derived_from(pd.DataFrame)
def abs(self):
_raise_if_object_series(self, "abs")
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False, dtype=None,
out=None, min_count=None):
result = self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every, out=out)
if min_count:
return result.where(self.notnull().sum(axis=axis) >= min_count,
other=np.NaN)
else:
return result
@derived_from(pd.DataFrame)
def prod(self, axis=None, skipna=True, split_every=False, dtype=None,
out=None, min_count=None):
result = self._reduction_agg('prod', axis=axis, skipna=skipna,
split_every=split_every, out=out)
if min_count:
return result.where(self.notnull().sum(axis=axis) >= min_count,
other=np.NaN)
else:
return result
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not is_series_like(meta)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not is_series_like(meta)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
result = self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "mean")
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
return handle_out(out, result)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
result = map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "var")
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._var_1d(self, skipna, ddof, split_every)
return handle_out(out, result)
count_timedeltas = len(self._meta_nonempty.select_dtypes(include=[np.timedelta64]).columns)
if count_timedeltas == len(self._meta.columns):
result = self._var_timedeltas(skipna, ddof, split_every)
elif count_timedeltas > 0:
result = self._var_mixed(skipna, ddof, split_every)
else:
result = self._var_numeric(skipna, ddof, split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
def _var_numeric(self, skipna=True, ddof=1, split_every=False):
num = self.select_dtypes(include=['number', 'bool'], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.astype('f8')
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'var-numeric' + tokenize(num, split_every)
cols = num._meta.columns if is_dataframe_like(num) else None
var_shape = num._meta_nonempty.values.var(axis=0).shape
array_var_name = (array_var._name,) + (0,) * len(var_shape)
layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(graph, name, num._meta_nonempty.var(), divisions=[None, None])
def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):
timedeltas = self.select_dtypes(include=[np.timedelta64])
var_timedeltas = [self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)
for col_idx in timedeltas._meta.columns]
var_timedelta_names = [(v._name, 0) for v in var_timedeltas]
name = self._token_prefix + 'var-timedeltas-' + tokenize(timedeltas, split_every)
layer = {(name, 0): (methods.wrap_var_reduction, var_timedelta_names, timedeltas._meta.columns)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=var_timedeltas)
return new_dd_object(graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None])
def _var_mixed(self, skipna=True, ddof=1, split_every=False):
data = self.select_dtypes(include=['number', 'bool', np.timedelta64])
timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)
numeric_vars = self._var_numeric(skipna, ddof, split_every)
name = self._token_prefix + 'var-mixed-' + tokenize(data, split_every)
layer = {(name, 0): (methods.var_mixed_concat,
(numeric_vars._name, 0),
(timedelta_vars._name, 0),
data._meta.columns)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[numeric_vars, timedelta_vars])
return new_dd_object(graph, name, self._meta_nonempty.var(), divisions=[None, None])
def _var_1d(self, column, skipna=True, ddof=1, split_every=False):
is_timedelta = is_timedelta64_dtype(column._meta)
if is_timedelta:
if not skipna:
is_nan = column.isna()
column = column.astype('i8')
column = column.mask(is_nan)
else:
column = column.dropna().astype('i8')
if PANDAS_VERSION >= '0.24.0':
if pd.Int64Dtype.is_dtype(column._meta_nonempty):
column = column.astype('f8')
if not np.issubdtype(column.dtype, np.number):
column = column.astype('f8')
name = self._token_prefix + 'var-1d-' + tokenize(column, split_every)
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)
layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(graph, name, column._meta_nonempty.var(), divisions=[None, None])
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "std")
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'std'
result = map_partitions(np.sqrt, v, meta=meta, token=name)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "sem")
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(M.sem, self, meta=meta,
token=self._token_prefix + 'sem',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'sem'
result = map_partitions(np.sqrt, v / n, meta=meta, token=name)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
def quantile(self, q=0.5, axis=0, method='default'):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=(q, 'f8'))
else:
_raise_if_object_series(self, "quantile")
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q, method) for c in num.columns)
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
layer = {(keyname, 0): (pd.Series, qnames, num.columns, None, meta.name)}
graph = HighLevelGraph.from_collections(keyname, layer, dependencies=quantiles)
divisions = (min(num.columns), max(num.columns))
return Series(graph, keyname, meta, divisions)
else:
layer = {(keyname, 0): (methods.concat, qnames, 1)}
graph = HighLevelGraph.from_collections(keyname, layer, dependencies=quantiles)
return DataFrame(graph, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self,
split_every=False,
percentiles=None,
percentiles_method='default',
include=None,
exclude=None):
if self._meta.ndim == 1:
return self._describe_1d(self, split_every, percentiles, percentiles_method)
elif (include is None) and (exclude is None):
data = self._meta.select_dtypes(include=[np.number, np.timedelta64])
# when some numerics/timedeltas are found, by default keep them
if len(data.columns) == 0:
chosen_columns = self._meta.columns
else:
# check if there are timedelta or boolean columns
bools_and_timedeltas = self._meta.select_dtypes(include=[np.timedelta64, 'bool'])
if len(bools_and_timedeltas.columns) == 0:
return self._describe_numeric(self, split_every, percentiles, percentiles_method)
else:
chosen_columns = data.columns
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
chosen_columns = self._meta.columns
else:
chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)
stats = [self._describe_1d(self[col_idx], split_every,
percentiles, percentiles_method) for col_idx in chosen_columns]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
layer = {(name, 0): (methods.describe_aggregate, stats_names)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = self._meta_nonempty.describe(include=include, exclude=exclude)
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_1d(self, data, split_every=False,
percentiles=None, percentiles_method='default'):
if is_bool_dtype(data._meta):
return self._describe_nonnumeric_1d(data, split_every=split_every)
elif is_numeric_dtype(data._meta):
return self._describe_numeric(
data,
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method)
elif is_timedelta64_dtype(data._meta):
return self._describe_numeric(
data.dropna().astype('i8'),
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
is_timedelta_column=True)
else:
return self._describe_nonnumeric_1d(data, split_every=split_every)
def _describe_numeric(self, data, split_every=False, percentiles=None,
percentiles_method='default', is_timedelta_column=False):
num = data._get_numeric_data()
if data.ndim == 2 and len(num.columns) == 0:
raise ValueError("DataFrame contains only non-numeric data.")
elif data.ndim == 1 and data.dtype == 'object':
raise ValueError("Cannot compute ``describe`` on object dtype.")
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
else:
# always include the the 50%tle to calculate the median
# unique removes duplicates and sorts quantiles
percentiles = np.array(percentiles)
percentiles = np.append(percentiles, 0.5)
percentiles = np.unique(percentiles)
percentiles = list(percentiles)
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile(percentiles, method=percentiles_method),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name if isinstance(data._meta, pd.Series) else None
name = 'describe-numeric--' + tokenize(num, split_every)
layer = {(name, 0): (methods.describe_numeric_aggregate, stats_names, colname, is_timedelta_column)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = num._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_nonnumeric_1d(self, data, split_every=False):
vcounts = data.value_counts(split_every)
count_nonzero = vcounts[vcounts != 0]
count_unique = count_nonzero.size
stats = [
# nunique
count_unique,
# count
data.count(split_every=split_every),
# most common value
vcounts._head(1, npartitions=1, compute=False, safe=False)
]
if is_datetime64_any_dtype(data._meta):
min_ts = data.dropna().astype('i8').min(split_every=split_every)
max_ts = data.dropna().astype('i8').max(split_every=split_every)
stats += [min_ts, max_ts]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name
name = 'describe-nonnumeric-1d--' + tokenize(data, split_every)
layer = {(name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = data._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _cum_agg(self, op_name, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None, out=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, op_name)
result = self.map_partitions(chunk, token=name, **chunk_kwargs)
return handle_out(out, result)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, op_name)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, op_name)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
suffix = tokenize(self)
name = '{0}{1}-{2}'.format(self._token_prefix, op_name, suffix)
cname = '{0}{1}-cum-last-{2}'.format(self._token_prefix, op_name,
suffix)
# aggregate cumulated partisions and its previous last element
layer = {}
layer[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
layer[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
layer[(cname, i)] = (aggregate, (cname, i - 1), (cumlast._name, i - 1))
layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
graph = HighLevelGraph.from_collections(cname, layer, dependencies=[cumpart, cumlast])
result = new_dd_object(graph, name, chunk(self._meta), self.divisions)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True, out=None):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True, out=None):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def isna(self):
if hasattr(pd, 'isna'):
return self.map_partitions(M.isna)
else:
raise NotImplementedError("Need more recent version of Pandas "
"to support isna. "
"Please use isnull instead.")
@derived_from(pd.DataFrame)
def isin(self, values):
if is_dataframe_like(self._meta):
# DataFrame.isin does weird alignment stuff
bad_types = (_Frame, pd.Series, pd.DataFrame)
else:
bad_types = (_Frame,)
if isinstance(values, bad_types):
raise NotImplementedError(
"Passing a %r to `isin`" % typename(type(values))
)
meta = self._meta_nonempty.isin(values)
# We wrap values in a delayed for two reasons:
# - avoid serializing data in every task
# - avoid cost of traversal of large list in optimizations
return self.map_partitions(M.isin, delayed(values), meta=meta)
@derived_from(pd.DataFrame)
def astype(self, dtype):
# XXX: Pandas will segfault for empty dataframes when setting
# categorical dtypes. This operation isn't allowed currently anyway. We
# get the metadata with a non-empty frame to throw the error instead of
# segfaulting.
if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):
meta = self._meta_nonempty.astype(dtype)
else:
meta = self._meta.astype(dtype)
if hasattr(dtype, 'items'):
set_unknown = [
k for k, v in dtype.items()
if is_categorical_dtype(v) and getattr(v, 'categories', None) is None
]
meta = clear_known_categories(meta, cols=set_unknown)
elif (is_categorical_dtype(dtype) and
getattr(dtype, 'categories', None) is None):
meta = clear_known_categories(meta)
return self.map_partitions(M.astype, dtype=dtype, meta=meta)
@derived_from(pd.Series)
def append(self, other, interleave_partitions=False):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
from .multi import concat
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
return concat([self, other], join='outer',
interleave_partitions=interleave_partitions)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = {(name1, i): (getitem, key, 0)
for i, key in enumerate(aligned.__dask_keys__())}
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = {(name2, i): (getitem, key, 1)
for i, key in enumerate(aligned.__dask_keys__())}
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value, overwrite=overwrite)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, closed=None, label=None):
from .tseries.resample import Resampler
return Resampler(self, rule, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
include_right = offset.isAnchored() or not hasattr(offset, '_inc')
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[:end + 1] + (date,)
name = 'first-' + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (methods.boundary_slice, (self._name, end),
None, date, include_right, True, 'loc')
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1:]
name = 'last-' + tokenize(self, offset)
dsk = {(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))}
dsk[(name, 0)] = (methods.boundary_slice, (self._name, start),
date, None, True, False, 'loc')
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca([self], chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every, b=16, meta=float)
@property
def values(self):
""" Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
return self.map_partitions(methods.values)
def _validate_chunks(self, arr, lengths):
from dask.array.core import normalize_chunks
if isinstance(lengths, Sequence):
lengths = tuple(lengths)
if len(lengths) != self.npartitions:
raise ValueError(
"The number of items in 'lengths' does not match "
"the number of partitions. "
"{} != {}".format(len(lengths), self.npartitions)
)
if self.ndim == 1:
chunks = normalize_chunks((lengths,))
else:
chunks = normalize_chunks((lengths, (len(self.columns),)))
return chunks
elif lengths is not None:
raise ValueError("Unexpected value for 'lengths': '{}'".format(lengths))
return arr._chunks
def _is_index_level_reference(self, key):
"""
Test whether a key is an index level reference
To be considered an index level reference, `key` must match the index name
and must NOT match the name of any column (if a dataframe).
"""
return (self.index.name is not None and
not is_dask_collection(key) and
(np.isscalar(key) or isinstance(key, tuple)) and
key == self.index.name and
key not in getattr(self, 'columns', ()))
def _contains_index_name(self, columns_or_index):
"""
Test whether the input contains a reference to the index of the DataFrame/Series
"""
if isinstance(columns_or_index, list):
return any(self._is_index_level_reference(n) for n in columns_or_index)
else:
return self._is_index_level_reference(columns_or_index)
def _raise_if_object_series(x, funcname):
"""
Utility function to raise an error if an object column does not support
a certain operation like `mean`.
"""
if isinstance(x, Series) and hasattr(x, "dtype") and x.dtype == object:
raise ValueError("`%s` not supported with object series" % funcname)
class Series(_Frame):
""" Parallel Pandas Series
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_is_partition_type = staticmethod(is_series_like)
_token_prefix = 'series-'
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask = renamed.dask
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def shape(self):
"""
Return a tuple representing the dimensionality of a Series.
The single element of the tuple is a Delayed result.
Examples
--------
>>> series.shape # doctest: +SKIP
# (dd.Scalar<size-ag..., dtype=int64>,)
"""
return (self.size,)
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
""" Namespace of datetime methods """
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
""" Namespace for string methods """
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statically for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ['cat', 'str']:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
""" Number of bytes """
return self.reduction(methods.nbytes, np.sum, token='nbytes',
meta=int, split_every=False)
def _repr_data(self):
return _repr_data_series(self._meta, self._repr_divisions)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".format(name=self.name,
dtype=self.dtype)
else:
footer = "dtype: {dtype}".format(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask))
def rename(self, index=None, inplace=False, sorted_index=False):
"""Alter Series index labels or name
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
Parameters
----------
index : scalar, hashable sequence, dict-like or callable, optional
If dict-like or callable, the transformation is applied to the
index. Scalar or hashable sequence-like will alter the
``Series.name`` attribute.
inplace : boolean, default False
Whether to return a new Series or modify this one inplace.
sorted_index : bool, default False
If true, the output ``Series`` will have known divisions inferred
from the input series and the transformation. Ignored for
non-callable/dict-like ``index`` or when the input series has
unknown divisions. Note that this may only be set to ``True`` if
you know that the transformed index is monotonicly increasing. Dask
will check that transformed divisions are monotonic, but cannot
check all the values between divisions, so incorrectly setting this
can result in bugs.
Returns
-------
renamed : Series
See Also
--------
pandas.Series.rename
"""
from pandas.api.types import is_scalar, is_list_like, is_dict_like
if is_scalar(index) or (is_list_like(index) and not is_dict_like(index)):
res = self if inplace else self.copy()
res.name = index
else:
res = self.map_partitions(M.rename, index)
if self.known_divisions:
if sorted_index and (callable(index) or is_dict_like(index)):
old = pd.Series(range(self.npartitions + 1),
index=self.divisions)
new = old.rename(index).index
if not new.is_monotonic_increasing:
msg = ("sorted_index=True, but the transformed index "
"isn't monotonic_increasing")
raise ValueError(msg)
res.divisions = tuple(new.tolist())
else:
res = res.clear_divisions()
if inplace:
self.dask = res.dask
self._name = res._name
self.divisions = res.divisions
self._meta = res._meta
res = self
return res
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5, method='default'):
""" Approximate quantiles of Series
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
return quantile(self, q, method=method)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return Series(graph, name, self._meta, self.divisions)
raise NotImplementedError(
"Series getitem in only supported for other series objects "
"with matching partition structure"
)
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, by=by, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name, split_out=split_out)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None, split_out=1):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token='series-nsmallest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, values):
# Added just to get the different docstring for Series
return super(Series, self).isin(values)
@insert_meta_param_description(pad=12)
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if is_series_like(arg) and is_dask_collection(arg):
return series_map(self, arg)
if not (isinstance(arg, dict) or
callable(arg) or
is_series_like(arg) and not is_dask_collection(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = {(name, i): (M.map, k, arg, na_action) for i, k in
enumerate(self.__dask_keys__())}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)
else:
meta = make_meta(meta, index=getattr(make_meta(self), 'index', None))
return Series(graph, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value)
@derived_from(pd.Series)
def squeeze(self):
return self
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
""" Create a Dask Bag from a Series """
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like Series.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = skip_doctest(op.__doc__)
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like Series.eq to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if fill_value is None:
return elemwise(comparison, self, other, axis=axis)
else:
op = partial(comparison, fill_value=fill_value)
return elemwise(op, self, other, axis=axis)
meth.__doc__ = skip_doctest(comparison.__doc__)
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if meta is no_default:
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, udf=True, **kwds)
warnings.warn(meta_warning(meta))
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None,
split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True,
split_every=split_every)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, Integral):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag),
split_every=split_every)
@derived_from(pd.Series)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
return delayed(sum)(result.to_delayed())
def __divmod__(self, other):
res1 = self // other
res2 = self % other
return res1, res2
def __rdivmod__(self, other):
res1 = other // self
res2 = other % self
return res1, res2
class Index(Series):
_partition_type = pd.Index
_is_partition_type = staticmethod(is_index_like)
_token_prefix = 'index-'
_accessors = set()
_dt_attributes = {'nanosecond', 'microsecond', 'millisecond', 'dayofyear',
'minute', 'hour', 'day', 'dayofweek', 'second', 'week',
'weekday', 'weekofyear', 'month', 'quarter', 'year'}
_cat_attributes = {'known', 'as_known', 'as_unknown', 'add_categories',
'categories', 'remove_categories', 'reorder_categories',
'as_ordered', 'codes', 'remove_unused_categories',
'set_categories', 'as_unordered', 'ordered',
'rename_categories'}
def __getattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super(Index, self).__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(M.shift, periods, meta=meta,
token='shift',
transform_divisions=False)
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, periods, token='shift',
meta=meta, freq=freq,
transform_divisions=False)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
@derived_from(pd.Index)
def to_series(self):
return self.map_partitions(M.to_series,
meta=self._meta.to_series())
@derived_from(pd.Index, ua_args=['index'])
def to_frame(self, index=True, name=None):
if not index:
raise NotImplementedError()
if PANDAS_VERSION >= '0.24.0':
return self.map_partitions(M.to_frame, index, name,
meta=self._meta.to_frame(index, name))
else:
if name is not None:
raise ValueError("The 'name' keyword was added in pandas 0.24.0. "
"Your version of pandas is '{}'.".format(PANDAS_VERSION))
else:
return self.map_partitions(M.to_frame,
meta=self._meta.to_frame())
class DataFrame(_Frame):
"""
Parallel Pandas DataFrame
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_is_partition_type = staticmethod(is_dataframe_like)
_token_prefix = 'dataframe-'
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask = renamed.dask
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
Only indexing the column positions is supported. Trying to select
row positions will raise a ValueError.
See :ref:`dataframe.indexing` for more.
Examples
--------
>>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP
"""
from .indexing import _iLocIndexer
return _iLocIndexer(self)
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key) or isinstance(key, (tuple, string_types)):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisions)
elif isinstance(key, slice):
from pandas.api.types import is_float_dtype
is_integer_slice = any(isinstance(i, Integral)
for i in (key.start, key.step, key.stop))
# Slicing with integer labels is always iloc based except for a
# float indexer for some reason
if is_integer_slice and not is_float_dtype(self.index.dtype):
self.iloc[key]
else:
return self.loc[key]
if (isinstance(key, (np.ndarray, list)) or (
not is_dask_collection(key) and (is_series_like(key) or is_index_like(key)))):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return new_dd_object(graph, name, self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):
key = list(key)
df = self.assign(**{k: value for k in key})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
self.divisions = df.divisions
def __delitem__(self, key):
result = self.drop([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
return self[key]
else:
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, string_types) and
isidentifier(c)))
return list(o)
def _ipython_key_completions_(self):
return self.columns.tolist()
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
The number of rows is a Delayed result. The number of columns
is a concrete integer.
Examples
--------
>>> df.size # doctest: +SKIP
(Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)
"""
col_size = len(self.columns)
row_size = delayed(int)(self.size / col_size)
return (row_size, col_size)
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, npartitions=None,
divisions=None, inplace=False, **kwargs):
"""Set the DataFrame index (row labels) using an existing column
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
This function operates exactly like ``pandas.set_index`` except with
different performance costs (it is much more expensive). Under normal
operation this function does an initial pass over the index column to
compute approximate qunatiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
df: Dask DataFrame
index: string or Dask Series
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None use the same as
the input. If 'auto' then decide by memory use.
shuffle: string, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
divisions: list, optional
Known values on which to separate index values of the partitions.
See https://docs.dask.org/en/latest/dataframe-design.html#partitions
Defaults to computing this with a single pass over the data. Note
that if ``sorted=True``, specified divisions are assumed to match
the existing partitions in the data. If this is untrue, you should
leave divisions empty and call ``repartition`` after ``set_index``.
inplace : bool, optional
Modifying the DataFrame in place is not supported by Dask.
Defaults to False.
compute: bool
Whether or not to trigger an immediate computation. Defaults to False.
Examples
--------
>>> df2 = df.set_index('x') # doctest: +SKIP
>>> df2 = df.set_index(d.x) # doctest: +SKIP
>>> df2 = df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range('2000', '2010', freq='1D')
>>> df2 = df.set_index('timestamp', sorted=True, divisions=divisions) # doctest: +SKIP
"""
if inplace:
raise NotImplementedError("The inplace= keyword is not supported")
pre_sorted = sorted
del sorted
if divisions is not None:
check_divisions(divisions)
if pre_sorted:
from .shuffle import set_sorted_index
return set_sorted_index(self, other, drop=drop, divisions=divisions,
**kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, npartitions=npartitions,
divisions=divisions, **kwargs)
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = '<PASSWORD>'
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
token = '<PASSWORD>'
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, by=by, **kwargs)
@wraps(categorize)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
return categorize(self, columns=columns, index=index,
split_every=split_every, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, Scalar) or is_series_like(v) or
callable(v) or pd.api.types.is_scalar(v) or
is_index_like(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(typename(type(v))))
if callable(v):
kwargs[k] = v(self)
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta_nonempty.assign(**_extract_meta(kwargs, nonempty=True))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame, ua_args=['index'])
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns=columns)
def query(self, expr, **kwargs):
""" Filter dataframe with complex expression
Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
See also
--------
pandas.DataFrame.query
"""
return self.map_partitions(M.query, expr, **kwargs)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if inplace is None:
inplace = False
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None, thresh=None):
return self.map_partitions(M.dropna, how=how, subset=subset, thresh=thresh)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.DataFrame)
def squeeze(self, axis=None):
if axis in [None, 1]:
if len(self.columns) == 1:
return self[self.columns[0]]
else:
return self
elif axis == 0:
raise NotImplementedError("{0} does not support "
"squeeze along axis 0".format(type(self)))
elif axis not in [0, 1, None]:
raise ValueError('No axis {0} for object type {1}'.format(
axis, type(self)))
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def to_parquet(self, path, *args, **kwargs):
""" See dd.to_parquet docstring for more information """
from .io import to_parquet
return to_parquet(self, path, *args, **kwargs)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows,
show_dimensions=False)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, errors='raise'):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.drop, labels, axis=axis, errors=errors)
raise NotImplementedError("Drop currently only works for axis=1")
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
"""Merge the DataFrame with another DataFrame
This will merge the two datasets, either on the indices, a certain column
in each dataset or the index in one dataset and the column in another.
Parameters
----------
right: dask.dataframe.DataFrame
how : {'left', 'right', 'outer', 'inner'}, default: 'inner'
How to handle the operation of the two objects:
- left: use calling frame's index (or column if on is specified)
- right: use other frame's index
- outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
- inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If on is None and not merging on indexes then this
defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column to join on in the left DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
right_on : label or list, or array-like
Column to join on in the right DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
left_index : boolean, default False
Use the index from the left DataFrame as the join key.
right_index : boolean, default False
Use the index from the right DataFrame as the join key.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and
right side, respectively
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row. If string, column with
information on source of each row will be added to output DataFrame,
and column will be named value of string. Information column is
Categorical-type and takes on a value of "left_only" for observations
whose merge key only appears in `left` DataFrame, "right_only" for
observations whose merge key only appears in `right` DataFrame,
and "both" if the observation’s merge key is found in both.
npartitions: int, None, or 'auto'
The ideal number of output partitions. This is only utilised when
performing a hash_join (merging on columns only). If `None`
npartitions = max(lhs.npartitions, rhs.npartitions)
shuffle: {'disk', 'tasks'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
Notes
-----
There are three ways to join dataframes:
1. Joining on indices. In this case the divisions are
aligned using the function ``dask.dataframe.multi.align_partitions``.
Afterwards, each partition is merged with the pandas merge function.
2. Joining one on index and one on column. In this case the divisions of
dataframe merged by index (:math:`d_i`) are used to divide the column
merged dataframe (:math:`d_c`) one using
``dask.dataframe.multi.rearrange_by_divisions``. In this case the
merged dataframe (:math:`d_m`) has the exact same divisions
as (:math:`d_i`). This can lead to issues if you merge multiple rows from
(:math:`d_c`) to one row in (:math:`d_i`).
3. Joining both on columns. In this case a hash join is performed using
``dask.dataframe.multi.hash_join``.
"""
if not is_dataframe_like(right):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not is_dataframe_like(other):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other, interleave_partitions=False):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif is_series_like(other):
other = other.to_frame().T
return super(DataFrame, self).append(
other, interleave_partitions=interleave_partitions)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self, index=True, name='Pandas'):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples(index=index, name=name):
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif is_series_like(other):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return map_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = skip_doctest(op.__doc__)
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.eq to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = skip_doctest(comparison.__doc__)
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
args=(), meta=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
pandas_kwargs = {
'axis': axis,
'broadcast': broadcast,
'raw': raw,
'reduce': None,
}
if PANDAS_VERSION >= '0.23.0':
kwds.setdefault('result_type', None)
kwds.update(pandas_kwargs)
if axis == 0:
msg = ("dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
raise NotImplementedError(msg)
if meta is no_default:
meta = _emulate(M.apply, self._meta_nonempty, func,
args=args, udf=True, **kwds)
warnings.warn(meta_warning(meta))
return map_partitions(M.apply, self, func, args=args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta='__no_default__'):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, split_every=False):
return cov_corr(self, min_periods, split_every=split_every)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None, split_every=False):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index_summary(index))
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
from pandas.io.formats.printing import pprint_thing
space = max([len(pprint_thing(k)) for k in self.columns]) + 3
column_template = '{!s:<%d} {} non-null {}' % space
column_info = [column_template.format(pprint_thing(x[0]), x[1], x[2])
for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [index_summary(self.columns, name='Columns')]
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems(), key=str)]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
@derived_from(pd.DataFrame)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
result = result.groupby(result.index).sum()
return result
def pivot_table(self, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(self, index=index, columns=columns, values=values,
aggfunc=aggfunc)
def to_records(self, index=False, lengths=None):
from .io import to_records
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
records = to_records(self)
chunks = self._validate_chunks(records, lengths)
records._chunks = (chunks[0],)
return records
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data().to_html(max_rows=max_rows,
show_dimensions=False)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
def _repr_data(self):
meta = self._meta
index = self._repr_divisions
series_list = [_repr_data_series(s, index=index) for _, s in meta.iteritems()]
return pd.concat(series_list, axis=1)
_HTML_FMT = """<div><strong>Dask DataFrame Structure:</strong></div>
{data}
<div>Dask Name: {name}, {task} tasks</div>"""
def _repr_html_(self):
data = self._repr_data().to_html(
max_rows=5,
show_dimensions=False,
notebook=True
)
return self._HTML_FMT.format(data=data, name=key_split(self._name),
task=len(self.dask))
def _select_columns_or_index(self, columns_or_index):
"""
Parameters
----------
columns_or_index
Column or index name, or a list of these
Returns
-------
dd.DataFrame
Dask DataFrame with columns corresponding to each column or
index level in columns_or_index. If included, the column
corresponding to the index level is named _index
"""
# Ensure columns_or_index is a list
columns_or_index = (columns_or_index
if isinstance(columns_or_index, list)
else [columns_or_index])
column_names = [n for n in columns_or_index if self._is_column_label_reference(n)]
selected_df = self[column_names]
if self._contains_index_name(columns_or_index):
# Index name was included
selected_df = selected_df.assign(_index=self.index)
return selected_df
def _is_column_label_reference(self, key):
"""
Test whether a key is a column label reference
To be considered a column label reference, `key` must match the name of at
least one column.
"""
return (not is_dask_collection(key) and
(np.isscalar(key) or isinstance(key, tuple)) and
key in self.columns)
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ['lt', 'gt', 'le', 'ge', 'ne', 'eq']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def is_broadcastable(dfs, s):
"""
This Series is broadcastable against another dataframe in the sequence
"""
return (isinstance(s, Series) and
s.npartitions == 1 and
s.known_divisions and
any(s.divisions == (min(df.columns), max(df.columns))
for df in dfs if isinstance(df, DataFrame)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for Dask dataframes
Parameters
----------
op: callable
Function to apply across input dataframes
*args: DataFrames, Series, Scalars, Arrays,
The arguments of the operation
**kwrags: scalars
meta: pd.DataFrame, pd.Series (optional)
Valid metadata for the operation. Will evaluate on a small piece of
data if not provided.
transform_divisions: boolean
If the input is a ``dask.dataframe.Index`` we normally will also apply
the function onto the divisions and apply those transformed divisions
to the output. You can pass ``transform_divisions=False`` to override
this behavior
Examples
--------
>>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP
"""
meta = kwargs.pop('meta', no_default)
out = kwargs.pop('out', None)
transform_divisions = kwargs.pop('transform_divisions', True)
_name = funcname(op) + '-' + tokenize(op, *args, **kwargs)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar, Array))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
# Clean up dask arrays if present
for i, a in enumerate(dasks):
if not isinstance(a, Array):
continue
# Ensure that they have similar-ish chunk structure
if not all(not a.chunks or len(a.chunks[0]) == df.npartitions for df in dfs):
msg = ("When combining dask arrays with dataframes they must "
"match chunking exactly. Operation: %s" % funcname(op))
raise ValueError(msg)
# Rechunk to have a single chunk along all other axes
if a.ndim > 1:
a = a.rechunk({i + 1: d for i, d in enumerate(a.shape[1:])})
dasks[i] = a
divisions = dfs[0].divisions
if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:
try:
divisions = op(
*[pd.Index(arg.divisions) if arg is dfs[0] else arg for arg in args],
**kwargs
)
if isinstance(divisions, pd.Index):
divisions = divisions.tolist()
except Exception:
pass
else:
if not valid_divisions(divisions):
divisions = [None] * (dfs[0].npartitions + 1)
_is_broadcastable = partial(is_broadcastable, dfs)
dfs = list(remove(_is_broadcastable, dfs))
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar, Array))]
# adjust the key length of Scalar
dsk = partitionwise_graph(op, _name, *args, **kwargs)
graph = HighLevelGraph.from_collections(_name, dsk, dependencies=dasks)
if meta is no_default:
if len(dfs) >= 2 and not all(hasattr(d, 'npartitions') for d in dasks):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
# For broadcastable series, use no rows.
parts = [d._meta if _is_broadcastable(d)
else empty_like_safe(d, (), dtype=d.dtype) if isinstance(d, Array)
else d._meta_nonempty for d in dasks]
with raise_on_meta_error(funcname(op)):
meta = partial_by_order(*parts, function=op, other=other)
result = new_dd_object(graph, _name, meta, divisions)
return handle_out(out, result)
def handle_out(out, result):
""" Handle out parameters
If out is a dask.DataFrame, dask.Series or dask.Scalar then
this overwrites the contents of it with the result
"""
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError("The out parameter is not fully supported")
else:
out = None
if out is not None and type(out) != type(result):
raise TypeError(
"Mismatched types between result and out parameter. "
"out=%s, result=%s" % (str(type(out)), str(type(result))))
if isinstance(out, DataFrame):
if len(out.columns) != len(result.columns):
raise ValueError(
"Mismatched columns count between result and out parameter. "
"out=%s, result=%s" % (str(len(out.columns)), str(len(result.columns))))
if isinstance(out, (Series, DataFrame, Scalar)):
out._meta = result._meta
out._name = result._name
out.dask = result.dask
if not isinstance(out, Scalar):
out.divisions = result.divisions
elif out is not None:
msg = (
"The out parameter is not fully supported."
" Received type %s, expected %s " % (
typename(type(out)), typename(type(result)))
)
raise NotImplementedError(msg)
else:
return result
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1)
if (is_series_like(df) or is_dataframe_like(df)) and not is_dask_collection(df)
else df for df in dfs]
return dfs
def hash_shard(df, nparts, split_out_setup=None, split_out_setup_kwargs=None):
if split_out_setup:
h = split_out_setup(df, **(split_out_setup_kwargs or {}))
else:
h = df
h = hash_pandas_object(h, index=False)
if is_series_like(h):
h = h._values
h %= nparts
return {i: df.iloc[h == i] for i in range(nparts)}
def split_evenly(df, k):
""" Split dataframe into k roughly equal parts """
divisions = np.linspace(0, len(df), k + 1).astype(int)
return {i: df.iloc[divisions[i]: divisions[i + 1]] for i in range(k)}
def split_out_on_index(df):
h = df.index
if isinstance(h, pd.MultiIndex):
h = pd.DataFrame([], index=h).reset_index()
return h
def split_out_on_cols(df, cols=None):
return df[cols]
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
meta=no_default, token=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None,
split_every=None, split_out=None, split_out_setup=None,
split_out_setup_kwargs=None, **kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
split_out : int, optional
Number of output partitions. Split occurs after first chunk reduction.
split_out_setup : callable, optional
If provided, this function is called on each chunk before performing
the hash-split. It should return a pandas object, where each row
(excluding the index) is hashed. If not provided, the chunk is hashed
as is.
split_out_setup_kwargs : dict, optional
Keywords for the `split_out_setup` function only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
dfs = [arg for arg in args if isinstance(arg, _Frame)]
npartitions = set(arg.npartitions for arg in dfs)
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, Integral):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs, combine_kwargs,
split_every, split_out, split_out_setup,
split_out_setup_kwargs)
# Chunk
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, 0, i, 0): (chunk, key)
for i, key in enumerate(args[0].__dask_keys__())}
else:
dsk = {(a, 0, i, 0): (apply, chunk,
[(x._name, i) if isinstance(x, _Frame)
else x for x in args], chunk_kwargs)
for i in range(npartitions)}
# Split
if split_out and split_out > 1:
split_prefix = 'split-%s' % token_key
shard_prefix = 'shard-%s' % token_key
for i in range(npartitions):
dsk[(split_prefix, i)] = (hash_shard, (a, 0, i, 0), split_out,
split_out_setup, split_out_setup_kwargs)
for j in range(split_out):
dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)
a = shard_prefix
else:
split_out = 1
# Combine
b = '{0}-combine-{1}'.format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
for j in range(split_out):
conc = (_concat, [(a, depth, i, j) for i in inds])
if combine_kwargs:
dsk[(b, depth + 1, part_i, j)] = (apply, combine, [conc], combine_kwargs)
else:
dsk[(b, depth + 1, part_i, j)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
# Aggregate
for j in range(split_out):
b = '{0}-agg-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, depth, i, j) for i in range(k)])
if aggregate_kwargs:
dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)
meta = _emulate(aggregate, _concat([meta_chunk]), udf=True,
**aggregate_kwargs)
meta = make_meta(meta, index=(getattr(make_meta(dfs[0]), 'index', None)
if dfs else None))
graph = HighLevelGraph.from_collections(b, dsk, dependencies=dfs)
divisions = [None] * (split_out + 1)
return new_dd_object(graph, b, meta, divisions)
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (Scalar, _Frame)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
elif isinstance(x, Delayed):
raise ValueError("Cannot infer dataframe metadata with a `dask.delayed` argument")
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func), udf=kwargs.pop('udf', False)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe. Arguments and keywords may contain
``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args
(both dask and pandas) will be repartitioned to align (if necessary)
before applying the function.
$META
"""
meta = kwargs.pop('meta', no_default)
name = kwargs.pop('token', None)
transform_divisions = kwargs.pop('transform_divisions', True)
assert callable(func)
if name is not None:
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
dfs = [df for df in args if isinstance(df, _Frame)]
meta_index = getattr(make_meta(dfs[0]), 'index', None) if dfs else None
if meta is no_default:
# Use non-normalized kwargs here, as we want the real values (not
# delayed values)
meta = _emulate(func, *args, udf=True, **kwargs)
else:
meta = make_meta(meta, index=meta_index)
if all(isinstance(arg, Scalar) for arg in args):
layer = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=args)
return Scalar(graph, name, meta)
elif not (has_parallel_type(meta) or is_arraylike(meta)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = make_meta(_concat([meta]), index=meta_index)
# Ensure meta is empty series
meta = make_meta(meta)
args2 = []
dependencies = []
for arg in args:
if isinstance(arg, _Frame):
args2.append(arg)
dependencies.append(arg)
continue
arg = normalize_arg(arg)
arg2, collections = unpack_collections(arg)
if collections:
args2.append(arg2)
dependencies.extend(collections)
else:
args2.append(arg)
kwargs3 = {}
for k, v in kwargs.items():
v = normalize_arg(v)
v, collections = unpack_collections(v)
dependencies.extend(collections)
kwargs3[k] = v
dsk = partitionwise_graph(
apply_and_enforce,
name,
*args2,
dependencies=dependencies,
_func=func,
_meta=meta,
**kwargs3
)
divisions = dfs[0].divisions
if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:
try:
divisions = func(
*[pd.Index(a.divisions) if a is dfs[0] else a for a in args],
**kwargs
)
if isinstance(divisions, pd.Index):
divisions = divisions.tolist()
except Exception:
pass
else:
if not valid_divisions(divisions):
divisions = [None] * (dfs[0].npartitions + 1)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return new_dd_object(graph, name, meta, divisions)
def apply_and_enforce(*args, **kwargs):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
func = kwargs.pop('_func')
meta = kwargs.pop('_meta')
df = func(*args, **kwargs)
if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):
if not len(df):
return meta
if is_dataframe_like(df):
# Need nan_to_num otherwise nan comparison gives False
if not np.array_equal(np.nan_to_num(meta.columns),
np.nan_to_num(df.columns)):
raise ValueError("The columns in the computed data do not match"
" the columns in the provided metadata")
else:
c = meta.columns
else:
c = meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if is_dataframe_like(df):
if is_dataframe_like(columns):
columns = columns.columns
if not isinstance(columns, pd.Index):
columns = pd.Index(columns)
if (len(columns) == len(df.columns) and
type(columns) is type(df.columns) and
columns.equals(df.columns)):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif is_series_like(df) or is_index_like(df):
if is_series_like(columns) or is_index_like(columns):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = partitionwise_graph(_rename, name, metadata, df)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
return new_dd_object(graph, name, metadata, df.divisions)
def quantile(df, q, method='default'):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for
floats and ints and fallback to the ``'dask'`` otherwise.
"""
# current implementation needs q to be sorted so
# sort if array-like, otherwise leave it alone
q_ndarray = np.array(q)
if q_ndarray.ndim > 0:
q_ndarray.sort(kind='mergesort')
q = q_ndarray
assert isinstance(df, Series)
allowed_methods = ['default', 'dask', 'tdigest']
if method not in allowed_methods:
raise ValueError("method can only be 'default', 'dask' or 'tdigest'")
if method == 'default':
internal_method = 'dask'
else:
internal_method = method
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if is_series_like(meta):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
df = df.dropna()
if (internal_method == 'tdigest' and
(np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer))):
from dask.utils import import_required
import_required('crick',
'crick is a required dependency for using the t-digest '
'method.')
from dask.array.percentile import _tdigest_chunk, _percentiles_from_tdigest
name = 'quantiles_tdigest-1-' + token
val_dsk = {(name, i): (_tdigest_chunk, (getattr, key, 'values'))
for i, key in enumerate(df.__dask_keys__())}
name2 = 'quantiles_tdigest-2-' + token
merge_dsk = {(name2, 0): finalize_tsk((_percentiles_from_tdigest, qs,
sorted(val_dsk)))}
else:
from dask.array.percentile import _percentile, merge_percentiles
name = 'quantiles-1-' + token
val_dsk = {(name, i): (_percentile, (getattr, key, 'values'), qs)
for i, key in enumerate(df.__dask_keys__())}
name2 = 'quantiles-2-' + token
merge_dsk = {(name2, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk)))}
dsk = merge(val_dsk, merge_dsk)
graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])
return return_type(graph, name2, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is False.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
if split_every is False:
split_every = df.npartitions
elif split_every < 2 or not isinstance(split_every, Integral):
raise ValueError("split_every must be an integer >= 2")
df = df._get_numeric_data()
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
token = tokenize(df, min_periods, scalar, split_every)
funcname = 'corr' if corr else 'cov'
a = '{0}-chunk-{1}'.format(funcname, df._name)
dsk = {(a, i): (cov_corr_chunk, f, corr)
for (i, f) in enumerate(df.__dask_keys__())}
prefix = '{0}-combine-{1}-'.format(funcname, df._name)
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)
k = part_i + 1
a = b
depth += 1
name = '{0}-{1}'.format(funcname, token)
dsk[(name, 0)] = (cov_corr_agg, [(a, i) for i in range(k)],
df.columns, min_periods, corr, scalar)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
if scalar:
return Scalar(graph, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df.columns)
return DataFrame(graph, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation
"""
shape = (df.shape[1], df.shape[1])
sums = np.zeros(shape)
counts = np.zeros(shape)
df = df.astype('float64', copy=False)
for idx, col in enumerate(df):
mask = df.iloc[:, idx].notnull()
sums[idx] = df[mask].sum().values
counts[idx] = df[mask].count().values
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
mu = (sums / counts).T
m = np.zeros(shape)
mask = df.isnull().values
for idx, x in enumerate(df):
mu_discrepancy = np.subtract.outer(df.iloc[:, idx], mu[idx]) ** 2
mu_discrepancy[mask] = np.nan
m[idx] = np.nansum(mu_discrepancy, axis=0)
m = m.T
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_combine(data, corr=False):
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
with np.errstate(invalid='ignore'):
d = (s2 / n2) - (s1 / n1)
C = (np.nansum((n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
out = np.empty(C.shape, dtype=data.dtype)
out['sum'] = cum_sums[-1]
out['count'] = cum_counts[-1]
out['cov'] = C
if corr:
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m = np.nansum(data['m'] + counts * (sums / counts_na - mu) ** 2,
axis=0)
out['m'] = m
return out
def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):
out = cov_corr_combine(data, corr)
counts = out['count']
C = out['cov']
C[counts < min_periods] = np.nan
if corr:
m2 = out['m']
den = np.sqrt(m2 * m2.T)
else:
den = np.where(counts, counts, np.nan) - 1
with np.errstate(invalid='ignore', divide='ignore'):
mat = C / den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=cols, index=cols)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
def _last_valid(s):
for i in range(1, min(10, len(s) + 1)):
val = s.iloc[-i]
if not pd.isnull(val):
return val
else:
nonnull = s[s.notna()]
if not nonnull.empty:
return nonnull.iloc[-1]
return None
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each column
if is_dataframe_like(a):
# create Series from appropriate backend dataframe library
series_typ = type(a.loc[0:1, a.columns[0]])
if a.empty:
return series_typ([])
return series_typ({col: _last_valid(a[col]) for col in a.columns},
index=a.columns)
else:
return _last_valid(a)
def check_divisions(divisions):
if not isinstance(divisions, (list, tuple)):
raise ValueError('New division must be list or tuple')
divisions = list(divisions)
if divisions != sorted(divisions):
raise ValueError('New division must be sorted')
if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
check_divisions(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
if len(a) == i + 1 or a[i] < a[i + 1]:
j += 1
i += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
while last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (methods.concat, tmp)
j += 1
return d
def repartition_freq(df, freq=None):
""" Repartition a timeseries dataframe by a new frequency """
if not isinstance(df.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
try:
start = df.divisions[0].ceil(freq)
except ValueError:
start = df.divisions[0]
divisions = pd.date_range(start=start,
end=df.divisions[-1],
freq=freq).tolist()
if not len(divisions):
divisions = [df.divisions[0], df.divisions[-1]]
else:
if divisions[-1] != df.divisions[-1]:
divisions.append(df.divisions[-1])
if divisions[0] != df.divisions[0]:
divisions = [df.divisions[0]] + divisions
return df.repartition(divisions=divisions)
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
if df.npartitions == npartitions:
return df
elif df.npartitions > npartitions:
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
dsk = {}
for new_partition_index in range(npartitions):
value = (methods.concat,
[(df._name, old_partition_index) for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
dsk[new_name, new_partition_index] = value
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])
return new_dd_object(graph, new_name, df._meta, divisions)
else:
original_divisions = divisions = pd.Series(df.divisions)
if (df.known_divisions and (np.issubdtype(divisions.dtype, np.datetime64) or
np.issubdtype(divisions.dtype, np.number))):
if np.issubdtype(divisions.dtype, np.datetime64):
divisions = divisions.values.astype('float64')
if is_series_like(divisions):
divisions = divisions.values
n = len(divisions)
divisions = np.interp(x=np.linspace(0, n, npartitions + 1),
xp=np.linspace(0, n, n),
fp=divisions)
if np.issubdtype(original_divisions.dtype, np.datetime64):
divisions = pd.Series(divisions).astype(original_divisions.dtype).tolist()
elif np.issubdtype(original_divisions.dtype, np.integer):
divisions = divisions.astype(original_divisions.dtype)
if isinstance(divisions, np.ndarray):
divisions = divisions.tolist()
divisions = list(divisions)
divisions[0] = df.divisions[0]
divisions[-1] = df.divisions[-1]
return df.repartition(divisions=divisions)
else:
ratio = npartitions / df.npartitions
split_name = 'split-%s' % tokenize(df, npartitions)
dsk = {}
last = 0
j = 0
for i in range(df.npartitions):
new = last + ratio
if i == df.npartitions - 1:
k = npartitions - j
else:
k = int(new - last)
dsk[(split_name, i)] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[(new_name, j)] = (getitem, (split_name, i), jj)
j += 1
last = new
divisions = [None] * (npartitions + 1)
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])
return new_dd_object(graph, new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[df])
return new_dd_object(graph, out, df._meta, divisions)
elif is_dataframe_like(df) or is_series_like(df):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if is_series_like(o) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if is_series_like(o) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def idxmaxmin_chunk(x, fn=None, skipna=True):
minmax = 'max' if fn == 'idxmax' else 'min'
if len(x) > 0:
idx = getattr(x, fn)(skipna=skipna)
value = getattr(x, minmax)(skipna=skipna)
else:
idx = value = pd.Series([], dtype='i8')
if is_series_like(idx):
return pd.DataFrame({'idx': idx, 'value': value})
return | pd.DataFrame({'idx': [idx], 'value': [value]}) | pandas.DataFrame |
from os.path import exists, join
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import trange
from math import ceil
from traitlets import Dict, List
from ctapipe.core import Tool
from targetpipe.fitting.spe_sipm import sipm_spe_fit
from targetpipe.fitting.chec import CHECSSPEFitter, CHECSSPEMultiFitter
from targetpipe.plots.official import ThesisPlotter
from IPython import embed
def get_params(lambda_=1):
params = dict(
norm=1000,
eped=-0.6,
eped_sigma=0.4,
spe=1.4,
spe_sigma=0.2,
lambda_=lambda_,
opct=0.6,
pap=0.3,
dap=0.4
)
return params.copy()
def get_params_multi(params1, params2, params3):
params_multi = dict(
norm1=params1['norm'],
norm2=params2['norm'],
norm3=params3['norm'],
eped=params1['eped'],
eped_sigma=params1['eped_sigma'],
spe=params1['spe'],
spe_sigma=params1['spe_sigma'],
lambda_1=params1['lambda_'],
lambda_2=params2['lambda_'],
lambda_3=params3['lambda_'],
opct=params1['opct'],
pap=params1['pap'],
dap=params1['dap']
)
return params_multi.copy()
def get_initial(lambda_=1):
params = dict(
norm=None,
eped=-0.5,
eped_sigma=0.5,
spe=1,
spe_sigma=0.1,
lambda_=lambda_,
opct=0.5,
pap=0.5,
dap=0.5
)
return params.copy()
def get_initial_multi(initial1, initial2, initial3):
params_multi = dict(
norm1=initial1['norm'],
norm2=initial2['norm'],
norm3=initial3['norm'],
eped=initial1['eped'],
eped_sigma=initial1['eped_sigma'],
spe=initial1['spe'],
spe_sigma=initial1['spe_sigma'],
lambda_1=initial1['lambda_'],
lambda_2=initial2['lambda_'],
lambda_3=initial3['lambda_'],
opct=initial1['opct'],
pap=initial1['pap'],
dap=initial1['dap']
)
return params_multi.copy()
def sample_distribution(x, params, n=30000):
y = sipm_spe_fit(x, **params)
samples = np.random.choice(x, 30000, p=y / y.sum())
return samples, y
class FitPlotter(ThesisPlotter):
def __init__(self, config, tool, **kwargs):
super().__init__(config, tool, **kwargs)
self.figures = dict()
def plot(self):
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter1.initial = get_initial(1)
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter2.initial = get_initial(2)
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter3.initial = get_initial(3)
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
fitter_multi.initial = get_initial_multi(fitter1.initial, fitter2.initial, fitter3.initial)
# Generate the functions
found_good = False
found_bad = False
i = 0
while not found_good or not found_bad:
self.log.info("FitPlotter: Attempt {}".format(i))
i += 1
params1 = get_params(1.2)
params2 = get_params(1.7)
params3 = get_params(3.1)
x = np.linspace(-3, 10, 1000)
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
params_multi = get_params_multi(params1, params2, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
print(pm, p1, p2, p3)
if (pm > p1) & (pm > p2) & (pm > p3) & (p1 < 0.0001):
if found_good:
continue
self.log.info("FitPlotter: Found good")
found_good = True
desc = "good"
elif (pm < 0.001) & (p3 > 0.001):
if found_bad:
continue
self.log.info("FitPlotter: Found bad")
found_bad = True
desc = "bad"
else:
continue
fig_individual = plt.figure(figsize=(13, 6))
fig_individual.suptitle("Individual Fit")
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax1_t = plt.subplot2grid((3, 2), (0, 1))
ax2 = plt.subplot2grid((3, 2), (1, 0))
ax2_t = plt.subplot2grid((3, 2), (1, 1))
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax3_t = plt.subplot2grid((3, 2), (2, 1))
self.individual_plot(x, y1, params1, samples1, fitter1, ax1, ax1_t, True)
self.individual_plot(x, y2, params2, samples2, fitter2, ax2, ax2_t)
self.individual_plot(x, y3, params3, samples3, fitter3, ax3, ax3_t)
name = "fit_" + desc + "_individual"
self.figures[name] = fig_individual
fig_multi = plt.figure(figsize=(13, 6))
fig_multi.suptitle("Multi Fit")
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax2 = plt.subplot2grid((3, 2), (1, 0))
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax_mt = plt.subplot2grid((3, 2), (0, 1), rowspan=3)
self.multi_plot(x, [y1, y2, y3], params_multi, [samples1, samples2, samples3], fitter_multi, [ax1, ax2, ax3], ax_mt)
name = "fit_" + desc + "_multi"
self.figures[name] = fig_multi
def save(self, output_path=None):
for name, fig in self.figures.items():
self.fig = fig
self.figure_name = name
super().save(output_path)
@staticmethod
def individual_plot(x, y, params, samples, fitter, ax_p, ax_t, legend=False):
hist = fitter.hist
edges = fitter.edges
between = fitter.between
coeff = fitter.coeff.copy()
coeffl = fitter.coeff_list.copy()
initial = fitter.initial.copy()
fit = fitter.fit_function(x, **coeff)
rc2 = fitter.reduced_chi2
pval = fitter.p_value
ax_p.plot(x, y, label="Base")
ax_p.hist(between, bins=edges, weights=hist, histtype='step', label="Hist")
ax_p.plot(x, fit, label="Fit")
td = [['%.3f' % params[i], initial[i], '%.3f' % coeff[i]] for i in coeffl]
td.append(["", "", '%.3g' % rc2])
td.append(["", "", '%.3g' % pval])
tr = coeffl
tr.append("Reduced Chi^2")
tr.append("P-Value")
tc = ['Base', 'Initial', 'Fit']
ax_t.axis('off')
table = ax_t.table(cellText=td, rowLabels=tr, colLabels=tc, loc='center')
table.set_fontsize(6)
table.scale(0.7, 0.7)
if legend:
ax_p.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
@staticmethod
def multi_plot(x, y_list, params, samples_list, fitter, ax_list, ax_t):
y1, y2, y3 = y_list
samples1, samples2, samples3 = samples_list
ax1, ax2, ax3 = ax_list
hist1, hist2, hist3 = fitter.hist
edges = fitter.edges
between = fitter.between
coeff = fitter.coeff.copy()
coeffl = fitter.coeff_list.copy()
initial = fitter.initial.copy()
fit1, fit2, fit3 = fitter.fit_function(x, **coeff)
rc2 = fitter.reduced_chi2
pval = fitter.p_value
ax1.plot(x, y1, label="Base")
ax1.hist(between, bins=edges, weights=hist1, histtype='step', label="Hist")
ax1.plot(x, fit1, label="Fit")
ax1.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
ax2.plot(x, y2, label="Base")
ax2.hist(between, bins=edges, weights=hist2, histtype='step', label="Hist")
ax2.plot(x, fit2, label="Fit")
ax3.plot(x, y3, label="Base")
ax3.hist(between, bins=edges, weights=hist3, histtype='step', label="Hist")
ax3.plot(x, fit3, label="Fit")
ax_t.axis('off')
td = [['%.3f' % params[i], initial[i], '%.3f' % coeff[i]] for i in coeffl]
td.append(["", "", '%.3g' % rc2])
td.append(["", "", '%.3g' % pval])
tr = coeffl
tr.append("Reduced Chi^2")
tr.append("P-Value")
tc = ['Base', 'Initial', 'Fit']
table = ax_t.table(cellText=td, rowLabels=tr, colLabels=tc, loc='center')
table.set_fontsize(6)
class NoInitialPlotter(ThesisPlotter):
def __init__(self, config, tool, **kwargs):
super().__init__(config, tool, **kwargs)
self.figures = dict()
self.dataset_path = self.output_path + "_data.h5"
self.initial1 = 1
self.initial2 = 1
self.initial3 = 1
self.figures = {}
def plot(self):
df = self.load_dataset()
df = df[df > 0.01].groupby('x').count().reset_index()
x = df['x']
y1 = df['p1']
y2 = df['p2']
y3 = df['p3']
ym = df['pm']
x = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in x]
self.fig, self.ax = self.create_figure()
self.add_points(x, y1, "Individual1")
self.add_points(x, y2, "Individual2")
self.add_points(x, y3, "Individual3")
self.add_points(x, ym, "Multi")
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Number of signficant p-values")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_p"] = self.fig
def add_points(self, x, y, label, p='-'):
x_i = np.arange(len(x))
self.ax.plot(x_i, y, p, label=label)
self.ax.set_xticks(x_i)
self.ax.set_xticklabels(x)
def add_points_err(self, x, y, y_err, label):
x_i = np.arange(len(x))
(_, caps, _) = self.ax.errorbar(x_i, y, xerr=None, yerr=y_err, fmt='o',
mew=0.5, label=label,
markersize=3, capsize=3)
for cap in caps:
cap.set_markeredgewidth(1)
self.ax.set_xticks(x_i)
self.ax.set_xticklabels(x)
def save(self, output_path=None):
for name, fig in self.figures.items():
self.figure_name = name
self.fig = fig
super().save(output_path)
def load_dataset(self):
if exists(self.dataset_path):
store = pd.HDFStore(self.dataset_path)
df = store['df']
else:
df = self.create_dataset()
store = pd.HDFStore(self.dataset_path)
store['df'] = df
return df
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter1.initial = get_initial(1)
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter2.initial = get_initial(1)
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter3.initial = get_initial(1)
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
fitter_multi.initial = get_initial_multi(fitter1.initial, fitter2.initial, fitter3.initial)
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
df_list.append(dict(x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm))
df = pd.DataFrame(df_list)
return df
class WithInitialPlotter(NoInitialPlotter):
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
fitter1.initial = get_initial(round(lambda_1[i]))
fitter2.initial = get_initial(round(lambda_2[i]))
fitter3.initial = get_initial(round(lambda_3[i]))
fitter_multi.initial = get_initial_multi(fitter1.initial,
fitter2.initial,
fitter3.initial)
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
df_list.append(dict(x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm))
df = pd.DataFrame(df_list)
return df
class CeilInitialPlotter(NoInitialPlotter):
def plot(self):
super().plot()
df = self.load_dataset()
u_i, u = pd.factorize(df['x'])
df['x_i'] = u_i
def rmse(true, fit):
fit = fit[~np.isnan(fit)]
n = fit.count()
return np.sqrt(np.sum(np.power(true - fit, 2)) / n)
def rmse_df(row):
lambda_1 = rmse(row['x'].iloc[0][0], row['rlambda_1'])
lambda_2 = rmse(row['x'].iloc[0][1], row['rlambda_2'])
lambda_3 = rmse(row['x'].iloc[0][2], row['rlambda_3'])
lambda_m1 = rmse(row['x'].iloc[0][0], row['rlambda_m1'])
lambda_m2 = rmse(row['x'].iloc[0][1], row['rlambda_m2'])
lambda_m3 = rmse(row['x'].iloc[0][2], row['rlambda_m3'])
opct_1 = rmse(row['opct'].iloc[0], row['ropct_1'])
opct_2 = rmse(row['opct'].iloc[0], row['ropct_2'])
opct_3 = rmse(row['opct'].iloc[0], row['ropct_3'])
opct_m = rmse(row['opct'].iloc[0], row['ropct_m'])
pap_1 = rmse(row['pap'].iloc[0], row['rpap_1'])
pap_2 = rmse(row['pap'].iloc[0], row['rpap_2'])
pap_3 = rmse(row['pap'].iloc[0], row['rpap_3'])
pap_m = rmse(row['pap'].iloc[0], row['rpap_m'])
return dict(
lambda_1=lambda_1,
lambda_2=lambda_2,
lambda_3=lambda_3,
lambda_m1=lambda_m1,
lambda_m2=lambda_m2,
lambda_m3=lambda_m3,
opct_1=opct_1,
opct_2=opct_2,
opct_3=opct_3,
opct_m=opct_m,
pap_1=pap_1,
pap_2=pap_2,
pap_3=pap_3,
pap_m=pap_m
)
data = df.groupby('x').apply(rmse_df)
df_list = []
for index, d in zip(data.index, data):
d['x'] = index
df_list.append(d)
df_rmse = pd.DataFrame(df_list)
x = df_rmse['x']
x = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in x]
self.fig, self.ax = self.create_figure()
self.add_points(x, df_rmse['lambda_1'], "Individual1")
self.add_points(x, df_rmse['lambda_2'], "Individual2")
self.add_points(x, df_rmse['lambda_3'], "Individual3")
self.add_points(x, df_rmse['lambda_m1'], "Multi1")
self.add_points(x, df_rmse['lambda_m2'], "Multi2")
self.add_points(x, df_rmse['lambda_m3'], "Multi3")
print("Lambda Multi1:", df_rmse['lambda_m1'][3])
print("Lambda Multi2:", df_rmse['lambda_m2'][3])
print("Lambda Multi3:", df_rmse['lambda_m3'][3])
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Root-Mean-Square Error")
self.ax.set_title("Lambda")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_lambda"] = self.fig
self.fig, self.ax = self.create_figure()
self.add_points(x, df_rmse['opct_1'], "Individual1")
self.add_points(x, df_rmse['opct_2'], "Individual2")
self.add_points(x, df_rmse['opct_3'], "Individual3")
self.add_points(x, df_rmse['opct_m'], "Multi")
print("opct Multi:", df_rmse['opct_m'][3])
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Root-Mean-Square Error")
self.ax.set_title("Optical Crosstalk Probability")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_opct"] = self.fig
self.fig, self.ax = self.create_figure()
self.add_points(x, df_rmse['pap_1'], "Individual1")
self.add_points(x, df_rmse['pap_2'], "Individual2")
self.add_points(x, df_rmse['pap_3'], "Individual3")
self.add_points(x, df_rmse['pap_m'], "Multi")
print("pap Multi:", df_rmse['pap_m'][3])
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Root-Mean-Square Error")
self.ax.set_title("After-pulse Probability")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_pap"] = self.fig
self.fig, self.ax = self.create_figure()
x0 = df['x'].values
x = [i[0] for i in x0]
y = df['rlambda_1'].values
self.ax.plot(x, y, 'x', mew=0.5, label="Individual1")
x = [i[1] for i in x0]
y = df['rlambda_2'].values
self.ax.plot(x, y, 'x', mew=0.5, label="Individual2")
x = [i[2] for i in x0]
y = df['rlambda_3'].values
self.ax.plot(x, y, 'x', mew=0.5, label="Individual3")
x = [i[0] for i in x0]
y = df['rlambda_m1'].values
self.ax.plot(x, y, 'x', mew=0.5, label="Multi1")
x = [i[1] for i in x0]
y = df['rlambda_m2'].values
self.ax.plot(x, y, 'x', mew=0.5, label="Multi2")
x = [i[2] for i in x0]
y = df['rlambda_m3'].values
self.ax.plot(x, y, 'x', mew=0.5, label="Multi3")
x = np.linspace(0, 5, 100)
y = np.linspace(0, 5, 100)
self.ax.plot(x, y, ls='--', c='black', lw=0.5)
self.ax.set_xlabel("Input Value")
self.ax.set_ylabel("Reconstructed Value")
self.ax.set_title("Lambda")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_lambda_rich"] = self.fig
self.fig, self.ax = self.create_figure()
x = df['x_i'].values
u_s = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in u]
y = df['rlambda_1'].values
self.ax.plot(x-0.2, y, 'x', mew=0.5, label="Individual1")
y = df['rlambda_2'].values
self.ax.plot(x-0.15, y, 'x', mew=0.5, label="Individual2")
y = df['rlambda_3'].values
self.ax.plot(x-0.05, y, 'x', mew=0.5, label="Individual3")
y = df['rlambda_m1'].values
self.ax.plot(x+0.05, y, 'x', mew=0.5, label="Multi1")
y = df['rlambda_m2'].values
self.ax.plot(x+0.15, y, 'x', mew=0.5, label="Multi2")
y = df['rlambda_m3'].values
self.ax.plot(x+0.2, y, 'x', mew=0.5, label="Multi3")
self.ax.set_xticks(np.arange(u.size))
self.ax.set_xticklabels(u_s)
r1 = [i[0] for i in u]
r2 = [i[1] for i in u]
r3 = [i[2] for i in u]
x = np.arange(u.size)
self.ax.plot(x, r1, c='b', lw=1)
self.ax.plot(x, r2, c='g', lw=1)
self.ax.plot(x, r3, c='r', lw=1)
self.ax.plot(x, r1, ls=':', c='purple', lw=1)
self.ax.plot(x, r2, ls=':', c='yellow', lw=1)
self.ax.plot(x, r3, ls=':', c='cyan', lw=1)
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Reconstructed Value")
self.ax.set_title("Lambda")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_lambda_io"] = self.fig
self.fig, self.ax = self.create_figure()
x = df['x_i'].values
u_s = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in u]
y = df['ropct_1'].values
self.ax.plot(x-0.2, y, 'x', mew=0.5, label="Individual1")
y = df['ropct_2'].values
self.ax.plot(x-0.1, y, 'x', mew=0.5, label="Individual2")
y = df['ropct_3'].values
self.ax.plot(x+0.1, y, 'x', mew=0.5, label="Individual3")
y = df['ropct_m'].values
self.ax.plot(x+0.2, y, 'x', mew=0.5, label="Multi")
self.ax.set_xticks(np.arange(u.size))
self.ax.set_xticklabels(u_s)
self.ax.axhline(df['opct'].iloc[0], c='black', lw=0.5)
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Reconstructed Value")
self.ax.set_title("Optical Crosstalk Probability")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_opct_io"] = self.fig
self.fig, self.ax = self.create_figure()
x = df['x_i'].values
u_s = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in u]
y = df['rpap_1'].values
self.ax.plot(x-0.2, y, 'x', mew=0.5, label="Individual1")
y = df['rpap_2'].values
self.ax.plot(x-0.1, y, 'x', mew=0.5, label="Individual2")
y = df['rpap_3'].values
self.ax.plot(x+0.1, y, 'x', mew=0.5, label="Individual3")
y = df['rpap_m'].values
self.ax.plot(x+0.2, y, 'x', mew=0.5, label="Multi")
self.ax.set_xticks(np.arange(u.size))
self.ax.set_xticklabels(u_s)
self.ax.axhline(df['pap'].iloc[0], c='black', lw=0.5)
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Reconstructed Value")
self.ax.set_title("After-pulse Probability")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_pap_io"] = self.fig
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
fitter1.initial = get_initial(ceil(lambda_1[i]))
fitter2.initial = get_initial(ceil(lambda_2[i]))
fitter3.initial = get_initial(ceil(lambda_3[i]))
fitter_multi.initial = get_initial_multi(fitter1.initial,
fitter2.initial,
fitter3.initial)
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
rlambda_1 = fitter1.coeff['lambda_']
ropct_1 = fitter1.coeff['opct']
rpap_1 = fitter1.coeff['pap']
fitter2.apply(samples2)
p2 = fitter2.p_value
rlambda_2 = fitter2.coeff['lambda_']
ropct_2 = fitter2.coeff['opct']
rpap_2 = fitter2.coeff['pap']
fitter3.apply(samples3)
p3 = fitter3.p_value
rlambda_3 = fitter3.coeff['lambda_']
ropct_3 = fitter3.coeff['opct']
rpap_3 = fitter3.coeff['pap']
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
rlambda_m1 = fitter_multi.coeff['lambda_1']
rlambda_m2 = fitter_multi.coeff['lambda_2']
rlambda_m3 = fitter_multi.coeff['lambda_3']
ropct_m = fitter_multi.coeff['opct']
rpap_m = fitter_multi.coeff['pap']
df_list.append(dict(
x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm,
rlambda_1=rlambda_1, rlambda_2=rlambda_2, rlambda_3=rlambda_3,
rlambda_m1=rlambda_m1, rlambda_m2=rlambda_m2, rlambda_m3=rlambda_m3,
pap=params1['pap'], opct=params1['opct'],
rpap_1=rpap_1, rpap_2=rpap_2, rpap_3=rpap_3, rpap_m=rpap_m,
ropct_1=ropct_1, ropct_2=ropct_2, ropct_3=ropct_3, ropct_m=ropct_m
))
df = | pd.DataFrame(df_list) | pandas.DataFrame |
# To access Earth Engine Python API.
import ee
ee.Authenticate()
ee.Initialize()
# For data manipulation and analysis.
import math
import pandas as pd
import numpy as np
np.set_printoptions(precision=4, suppress=True)
from datetime import datetime
import scipy.signal
# For plotting
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.dates as mdates
import datetime as dt
def MakeGrid(geometry, scale):
"""Takes a polygon and creates a grid of polygons inside the shape.
Adapted from: https://developers.google.com/earth-engine/tutorials/community/drawing-tools
Keyword arguments:
geometry -- Earth Engine geometry object
scale -- desired spacing of grid
"""
# pixelLonLat returns an image with each pixel labeled with longitude and
# latitude values.
lonLat = ee.Image.pixelLonLat()
# Select the longitude and latitude bands, multiply by a large number then
# truncate them to integers.
lonGrid = lonLat.select('longitude').multiply(10000000).toInt()
latGrid = lonLat.select('latitude').multiply(10000000).toInt()
# To produce the grid, multiply the latitude and longitude images and then use
# reduce to vectors at the 10km resolution to group the grid into vectors.
return lonGrid.multiply(latGrid).reduceToVectors(geometry = geometry, scale = scale, geometryType = 'polygon',)
def Add_ID_to_Features(dataset):
"""Gives a unique ID number to each feature in a feature collection, as a new property.
Adapted from: https://gis.stackexchange.com/questions/374137/add-an-incremential-number-to-each-feature-in-a-featurecollection-in-gee
Keyword argument:
dataset -- Earth Engine feature collection
"""
indexes = ee.List(dataset.aggregate_array('system:index'))
feat_ids = ee.List.sequence(1, indexes.size())
idByIndex = ee.Dictionary.fromLists(indexes, feat_ids)
def function(feature):
"""Adds the ID number to the feature."""
return feature.set('ID', idByIndex.get(feature.get('system:index')))
# Map the function over the dataset.
return dataset.map(function)
def WindowDate_to_CSV(windowTimes, name_str, dayInterval):
"""Convert NumPy array of epoch times into human-readable format, in Pandas dataframe and CSV.
Keyword argument:
windowTimes -- NumPy array of dates in Unix epoch format (float)
dayInterval -- Interval between windows in days (int)
"""
windowDateTime = []
dayInterval = str(dayInterval)
for time in range(len(windowTimes)):
# Convert ms to s by dividing by 1000
windowDateTime.append( datetime.fromtimestamp(windowTimes[time] / 1000) )
windowDateTime_df = pd.DataFrame(windowDateTime)
windowDateTime_df.columns = ["Time"]
windowDateTime_df.to_csv("windows_{}_{}day_interval.csv".format(name_str, dayInterval))
def ZonalStats(valueCollection, zoneCollection, statistic, scale = 10, tileScale = 16):
"""Computes zonal statistics across an image collection as a table.
An output value is computed:
1) For every zone in the input zone collection.
2) For every image in the value collection.
Keyword arguments:
valueCollection -- image collection whose values are used to find the output statistic
zoneCollection -- feature collection of polygons that define zones to reduce the statistic to
statistic -- Earth Engine reducer that calculates a given statistic for each zone
scale -- pixel resolution of the data input into the function (default 10 m)
tileScale -- sets operation tile size to avoid exceeding memory limit (default 16)
"""
def ZS_Map(image):
"""Define zonal statistics operation, then apply to entire image collection
Adapted from: https://gis.stackexchange.com/questions/333392/gee-reduceregions-for-an-image-collection
"""
return image.reduceRegions(collection = zoneCollection,
reducer = statistic,
scale = scale,
tileScale = tileScale)
reduced = valueCollection.map(ZS_Map)
# The above gives a collection of collections. To convert to a "table", simply flatten it.
return reduced.flatten()
def MovingWindowDetectChange(pre_window_Start, window_Start, window_End, post_window_End, sizeWindows, numWindows, polygons, slope_threshold = 0.5, curv_threshold = -0.005):
"""Compute SAR intensity change detection algorithm over a moving window and aggregate with zonal statistics.
The change detection uses the ratio of a stack of SAR images before the window and a stack of SAR images after the window.
SAR intensity ratio change detection code written by <NAME> and <NAME>.
https://doi.org/10.5194/nhess-2021-283
Keyword arguments:
pre_window_Start -- date to begin the pre-window stack
window_Start -- date that the window begins
window_End -- date that the window ends
post_window_End -- date to end the post-window stack
sizeWindows -- duration of the window in days; also determines moving window step size
numWindows -- number of times to move the window forward
polygons -- feature collection of polygons that define zones to detect landslides within
slope_threshold -- upper threshold for slope mask (default 0.5 degrees)
curv_threshold -- lower threshold for curvature mask (default -0.005 m/m^2)
"""
import datetime as dt
# Get change detection function, based on Handwerger et al. (2021)
from ChangeDetect import I_Ratio
# Define an area to filter image collections by.
aoi = polygons.geometry().bounds()
# Create an array to hold running list of window dates.
windowEpochTime = np.empty(numWindows, dtype = float)
ChangeCollection = []
i = 1
print("Processing . . .\n")
# Run the change detection function a number of times while changing the window each iteration.
for window in range(numWindows):
ChangeImg = I_Ratio(aoi, slope_threshold, curv_threshold,
pre_window_Start, window_Start,
window_End, post_window_End)
# Get the approximate date of the window by finding the mean of the beginning and end dates.
windowDateAvg = (window_Start.getInfo()['value'] + window_End.getInfo()['value']) / 2
# Divide by 1000 to convert from ms to s.
windowDateStr = dt.datetime.fromtimestamp(windowDateAvg / 1000).strftime('%Y-%m-%d')
print('\tWindow: ', i, ' of ', numWindows, '(',windowDateStr , ')')
i = i + 1
if ChangeImg is None:
"""Results from no data in one or more stacks.
Pass null output and advance to the next window."""
# Prepare for the next computation.
# Move all dates forward a set number of days equal to the window size.
pre_window_Start = pre_window_Start.advance(sizeWindows, 'day')
window_Start = window_Start.advance(sizeWindows, 'day')
window_End = window_End.advance(sizeWindows, 'day')
post_window_End = post_window_End.advance(sizeWindows, 'day')
windowEpochTime[window] = None
else:
# Add the date of this window to the list.
windowEpochTime[window] = windowDateAvg
pre_window_Start = pre_window_Start.advance(sizeWindows, 'day')
window_Start = window_Start.advance(sizeWindows, 'day')
window_End = window_End.advance(sizeWindows, 'day')
post_window_End = post_window_End.advance(sizeWindows, 'day')
# Build an image collection out of the intensity change images.
# Saving all images to a list and converting all at once tends to run out
# of memory, so add images to collection one at a time.
if not ChangeCollection:
# Initialize the collection during the first iteration of the loop.
ChangeCollection = ee.ImageCollection(ChangeImg)
else:
# There is no EE method to add an image to an image collection, so
# a dummy collection is needed to merge with the existing collection.
ChangeCollection = ChangeCollection.merge(ee.ImageCollection(ChangeImg))
# Find zonal statistics across the entire image collection.
zonalChange_sum = ZonalStats(ChangeCollection, polygons, ee.Reducer.sum())
zonalChange_count = ZonalStats(ChangeCollection, polygons, ee.Reducer.count())
print('\n*** Complete! ***')
return zonalChange_sum, zonalChange_count, windowEpochTime
def Get_BeforeAfter_Imagery(out_dir, ID, event_time, region, sizeWindows):
"""Obtain cloud-masked Sentinel-2 imagery before/after a given date.
Keyword arguments:
ID -- unique integer identifier of polygon representing landslide detection area
event_time -- mean date of detection window (input format)
region -- bounding box of area of interest (input format)
sizeWindows -- duration of the detection window in days
"""
def MaskS2clouds(image):
"""Filter and mask clouds for Sentinel-2 optical data
From: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2
"""
qa = image.select('QA60')
#Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = (qa.bitwiseAnd(cloudBitMask).eq(0)
.And(qa.bitwiseAnd(cirrusBitMask).eq(0))
)
return image.updateMask(mask).divide(10000)
str_pre = "{}_{}_pre".format(ID, str(event_time)[ 0 : 10 ])
str_post = "{}_{}_post".format(ID, str(event_time)[ 0 : 10 ])
event_time_ee = ee.Date(event_time)
# Pre-window time period for pre-event S2 image collection.
preWindow_T1 = event_time_ee.advance(-sizeWindows - 30, 'day')
preWindow_T2 = event_time_ee.advance(-sizeWindows, 'day')
# Post-window time period for post-event S2 image collection.
postWindow_T1 = event_time_ee.advance(sizeWindows, 'day')
postWindow_T2 = event_time_ee.advance(sizeWindows + 30, 'day')
optical_pre = (ee.ImageCollection('COPERNICUS/S2')
.filterDate(preWindow_T1, preWindow_T2)
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 10))
.filterBounds(region)
.map(MaskS2clouds)
.median()
)
optical_post = (ee.ImageCollection('COPERNICUS/S2')
.filterDate(postWindow_T1, postWindow_T2)
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 10))
.filterBounds(region)
.map(MaskS2clouds)
.median()
)
exportRegion = ee.Geometry.Polygon(region.filter(ee.Filter.eq('ID', ID)).first().getInfo()['geometry']['coordinates'])
exportImgPre = ee.batch.Export.image.toDrive(image = optical_pre,
folder = out_dir,
description = str_pre,
region = exportRegion,
scale = 10,
maxPixels = 1e9,
fileFormat = 'GeoTIFF')
exportImgPost = ee.batch.Export.image.toDrive(image = optical_post,
folder = out_dir,
description = str_post,
region = exportRegion,
scale = 10,
maxPixels = 1e9,
fileFormat = 'GeoTIFF')
exportImgPre.start()
exportImgPost.start()
def Get_BeforeAfter_NDVI(ID, event_time, region, sizeWindows):
"""Obtain cloud-masked median NDVI before/after a given date.
Keyword arguments:
ID -- unique integer identifier of polygon representing landslide detection area
event_time -- mean date of detection window (input format)
region -- bounding box of area of interest (input format)
sizeWindows -- duration of the detection window in days
"""
def AddNDVI(image):
"""Adds an NDVI band to a Sentinel-2 image. NDVI is calculated as
the normalized difference between the near-infrared band and the red band,
which correspond to the 8th and 4th band in the Sentinel-2 imagery.
From: https://developers.google.com/earth-engine/tutorials/tutorial_api_06
"""
ndvi = image.normalizedDifference(['B8', 'B4']).rename('NDVI')
return image.addBands(ndvi)
# Function to filter and mask clouds for Sentinel-2 optical data
def MaskS2clouds(image):
"""Filter and mask clouds for Sentinel-2 optical data
From: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2
"""
qa = image.select('QA60')
#Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = (qa.bitwiseAnd(cloudBitMask).eq(0)
.And(qa.bitwiseAnd(cirrusBitMask).eq(0))
)
return image.updateMask(mask).divide(10000)
event_time_ee = ee.Date(event_time)
# Define pre-event time period, for pre-event NDVI image.
preWindow_T1 = event_time_ee.advance(-sizeWindows - 30, 'day')
preWindow_T2 = event_time_ee.advance(-sizeWindows, 'day')
# Define post-event time period, for post-event NDVI image
postWindow_T1 = event_time_ee.advance(sizeWindows, 'day')
postWindow_T2 = event_time_ee.advance(sizeWindows + 30, 'day')
# Get Sentinel 2 surface reflectance imagery before and after the window.
s2_sr_before = ee.ImageCollection('COPERNICUS/S2').filterDate(preWindow_T1, preWindow_T2).filterBounds(region)
s2_sr_after = ee.ImageCollection('COPERNICUS/S2').filterDate(postWindow_T1, postWindow_T2).filterBounds(region)
# Apply the cloud masking function to the before/after image collections.
s2_sr_before = s2_sr_before.map(MaskS2clouds)
s2_sr_after = s2_sr_after.map(MaskS2clouds)
# Apply the NDVI function to the before/after image collections.
s2_ndvi_before = s2_sr_before.map(AddNDVI)
s2_ndvi_after = s2_sr_after.map(AddNDVI)
# Find median of the images in the pre-event image collection to get a pre-event image.
pre_event_NDVI_img = s2_ndvi_before.select('NDVI').median()
# Find median of the images in the post-event image collection, to get a post-event image.
post_event_NDVI_img = s2_ndvi_after.select('NDVI').median()
# Get the average NDVI over the area
pre_NDVI = pre_event_NDVI_img.reduceRegion(
geometry = region.filter(ee.Filter.eq('ID', ID)),
reducer = ee.Reducer.mean(),
scale = 10,
bestEffort = True,
maxPixels = 1e9
)
post_NDVI = post_event_NDVI_img.reduceRegion(
geometry = region.filter(ee.Filter.eq('ID', ID)),
reducer = ee.Reducer.mean(),
scale = 10,
bestEffort = True,
maxPixels = 1e9
)
return pre_NDVI, post_NDVI
# Name of output Google Drive folder for CSVs and GeoTiffs.
out_dir = 'EE_SAR_MovingWindow_ChangeDetection_OR_Eugene_Subset'
# Descriptive prefix for output files.
file_name = 'OR_LS_Eugene'
# Area of interest to make a grid over.
ee_import = ee.FeatureCollection("users/bvoelker/OR_Landslides/OR_LS_Eugene_Subset")
aoi = ee_import.geometry()
# The polygons to check for changes in will be a regularly spaced grid.
grid_size = 500 # 500m scale
grid = MakeGrid(aoi, grid_size)
grid = Add_ID_to_Features(grid)
# Define duration of window.
sizeWindows = 7 # days
# Number of windows controls number of loops in change detection function.
# sizeWindows * numWindows = total detection period
numWindows = 208 + 20 # approx. 4 years plus 5 months
# Define how many days are in the pre window stack.
# Recommended to be multiples of one year to encompass whole cycles of seasonal vegetation change.
sizePreStack = 365 # days
# Define how many days are in the post window stack.
# Shorter windows will resolve changes more quickly at the cost of more noise in the SAR stack.
sizePostStack = 60 # days
# These parameters determine only the dates of the first window. All dates will be iteratively
# moved forward within the 'MovingWindowDetectChange' function.
# The date to start initial pre-window stack.
"""GEE S1_GRD data beings on 2014-10-03T00:00:00Z.
First ASF S1 data over Oregon is June 3 2015.
However, full coverage of the state only begins in 2016."""
pre_window_Start = ee.Date('2016-06-03T00:00') # format: 'yyyy-mm-dd-HH':MM
# Date of the initial window start | End of pre-window stack.
window_Start = pre_window_Start.advance(sizePreStack, 'day') # format: 'yyyy-mm-dd-HH':MM
# Date of the initial window end | Start of post-window stack.
window_End = window_Start.advance(sizeWindows, 'day')
# The date to end initial post-window stack.
post_window_End = window_End.advance(sizePostStack, 'day')
# Apply parameters to moving window SAR intensity change detection function.
sumChangePerZone, countPerZone, windowTimes = MovingWindowDetectChange(pre_window_Start, window_Start,
window_End, post_window_End,
sizeWindows, numWindows,
grid)
# Remove nans from window date array, in cases where there were empty stacks.
windowTimes = windowTimes[~np.isnan(windowTimes)]
# To further manipulate the data outside of EE, export to a CSV.
exportToCSV_sum = ee.batch.Export.table.toDrive(collection = sumChangePerZone,
folder = out_dir,
description = "{}_99th_Ptile_sum".format(file_name),
fileFormat = 'CSV')
exportToCSV_count = ee.batch.Export.table.toDrive(collection = countPerZone,
folder = out_dir,
description = "{}_pixel_count".format(file_name),
fileFormat = 'CSV')
exportToCSV_sum.start()
exportToCSV_count.start()
WindowDate_to_CSV(windowTimes, file_name, sizeWindows)
"""
Wait for export to finish in GEE before continuing!
"""
#########################################################################################################################
"""
When finished, save CSVs to same directory as notebook.
"""
detection_date_list = []
detection_ID_list = []
detection_index_list = []
# Number of landslide polygons or grid cells.
numPolys = grid.size().getInfo()
# To avoid repead detections from the same event, determine an amount of time before
# another detection can be acquired.
reset_interval = math.ceil(math.ceil(sizePreStack / 2) / sizeWindows)
# Load in the CSVs:
# The needed columns are the ID numbers and zonal statistics result (either sum or count).
data_sum_raw = pd.read_csv("{}_99th_Ptile_sum.csv".format(file_name))
data_count_raw = pd.read_csv("{}_pixel_count.csv".format(file_name))
# Read in window dates
windows = pd.read_csv("windows_{}_{}day_interval.csv".format(file_name, sizeWindows))
windows['Time'] = | pd.to_datetime(windows['Time']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = | Series(non_int_round_dict) | pandas.Series |
import pandas as pd
import pandas.testing as pd_testing
import pyfakefs.fake_filesystem_unittest
import unittest
from .main import (
bh_correction,
calculate_enrichment,
count_domains_by_bait,
filter_saint,
fishers_test,
get_background,
map_file_ids,
parse_domains,
read_domains,
read_gene_map,
read_saint,
)
class ReadDomains(pyfakefs.fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
def test(self):
file_contents = (
'{\n'
'\t"1": [\n'
'\t\t{"name": "dA", "start": 10, "end": 21},\n'
'\t\t{"name": "dB", "start": 30, "end": 45}\n'
'\t],\n'
'\t"2": [\n'
'\t\t{"name": "dC", "start": 15, "end": 35}\n'
'\t],\n'
'\t"3": [\n'
'\t\t{"name": "dA", "start": 20, "end": 31},\n'
'\t\t{"name": "dD", "start": 35, "end": 65}\n'
'\t],\n'
'\t"5": [\n'
'\t\t{"name": "dD", "start": 10, "end": 40},\n'
'\t\t{"name": "dE", "start": 50, "end": 55}\n'
'\t],\n'
'\t"6": [\n'
'\t\t{"name": "dF", "start": 30, "end": 45},\n'
'\t\t{"name": "dA", "start": 60, "end": 71}\n'
'\t]\n'
'}\n'
)
filepath = '/test/domains.json'
self.fs.create_file(filepath, contents=file_contents)
expected = {
'1': [
{ 'name': 'dA', 'start': 10, 'end': 21 },
{ 'name': 'dB', 'start': 30, 'end': 45 },
],
'2': [
{ 'name': 'dC', 'start': 15, 'end': 35 },
],
'3': [
{ 'name': 'dA', 'start': 20, 'end': 31 },
{ 'name': 'dD', 'start': 35, 'end': 65 },
],
'5': [
{ 'name': 'dD', 'start': 10, 'end': 40 },
{ 'name': 'dE', 'start': 50, 'end': 55 },
],
'6': [
{ 'name': 'dF', 'start': 30, 'end': 45 },
{ 'name': 'dA', 'start': 60, 'end': 71 },
],
}
self.assertEqual(read_domains(filepath), expected)
class ReadGeneMap(pyfakefs.fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
def get_test_options(self, arg_idtype):
file_contents = (
'{\n'
'"1": {\n'
'"entrez": "11",\n'
'"refseqp": ["NP_11111"]\n'
'},\n'
'"2": {\n'
'"entrez": "22",\n'
'"refseqp": ["NP_22222", "NP_02222"]\n'
'},\n'
'"3": {\n'
'"entrez": "33",\n'
'"refseqp": ["NP_33333"]\n'
'},\n'
'"5": {\n'
'"entrez": "55",\n'
'"refseqp": ["NP_55555"]\n'
'},\n'
'"6": {\n'
'"entrez": "66",\n'
'"refseqp": ["NP_66666"]\n'
'}\n'
'}\n'
)
filepath = '/test/genemap.json'
self.fs.create_file(filepath, contents=file_contents)
class Options:
genemap = filepath
idtype = arg_idtype
return Options()
def test_list_ids(self):
options = self.get_test_options('refseqp')
expected = {
'NP_11111': '1',
'NP_22222': '2',
'NP_02222': '2',
'NP_33333': '3',
'NP_55555': '5',
'NP_66666': '6',
}
self.assertEqual(read_gene_map(options), expected)
def test_string_id(self):
options = self.get_test_options('entrez')
expected = {
'11': '1',
'22': '2',
'33': '3',
'55': '5',
'66': '6',
}
self.assertEqual(read_gene_map(options), expected)
class ReadSaint(pyfakefs.fake_filesystem_unittest.TestCase):
def assertDataframeEqual(self, a, b, msg):
try:
pd_testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataframeEqual)
self.setUpPyfakefs()
def test(self):
file_contents = (
'Bait\tPrey\tPreyGene\tSpec\tAvgSpec\tBFDR\n'
'AAA\tNP_11111\tprey1\t\t10\t0.01\n'
'AAA\tNP_22222\tprey2\t\t20\t0\n'
'AAA\tNP_33333\tprey3\t\t30\t0.02\n'
'AAA\tNP_44444\tprey4\t\t15\t0.01\n'
'AAA\tNP_55555\tprey5\t\t25\t0.01\n'
'AAA\tNP_66666\tprey6\t\t40\t0.01\n'
'BBB\tNP_11111\tprey1\t\t10\t0.05\n'
'BBB\tNP_22222\tprey2\t\t20\t0.01\n'
'BBB\tNP_77777\tprey7\t\t30\t0.01\n'
)
filepath = '/test/saint.txt'
self.fs.create_file(filepath, contents=file_contents)
expected = pd.DataFrame([
{ 'Bait': 'AAA', 'Prey': 'NP_11111', 'PreyGene': 'prey1', 'AvgSpec': 10, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': 'NP_22222', 'PreyGene': 'prey2', 'AvgSpec': 20, 'BFDR': 0 },
{ 'Bait': 'AAA', 'Prey': 'NP_33333', 'PreyGene': 'prey3', 'AvgSpec': 30, 'BFDR': 0.02 },
{ 'Bait': 'AAA', 'Prey': 'NP_44444', 'PreyGene': 'prey4', 'AvgSpec': 15, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': 'NP_55555', 'PreyGene': 'prey5', 'AvgSpec': 25, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': 'NP_66666', 'PreyGene': 'prey6', 'AvgSpec': 40, 'BFDR': 0.01 },
{ 'Bait': 'BBB', 'Prey': 'NP_11111', 'PreyGene': 'prey1', 'AvgSpec': 10, 'BFDR': 0.05 },
{ 'Bait': 'BBB', 'Prey': 'NP_22222', 'PreyGene': 'prey2', 'AvgSpec': 20, 'BFDR': 0.01 },
{ 'Bait': 'BBB', 'Prey': 'NP_77777', 'PreyGene': 'prey7', 'AvgSpec': 30, 'BFDR': 0.01 },
])
self.assertEqual(read_saint(filepath), expected)
class MapFileIds(pyfakefs.fake_filesystem_unittest.TestCase):
def assertDataframeEqual(self, a, b, msg):
try:
pd_testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataframeEqual)
def test(self):
df = pd.DataFrame([
{ 'Bait': 'AAA', 'Prey': 'NP_11111.1', 'PreyGene': 'prey1', 'AvgSpec': 10, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': 'NP_22222', 'PreyGene': 'prey2', 'AvgSpec': 20, 'BFDR': 0 },
{ 'Bait': 'AAA', 'Prey': 'NP_33333.3', 'PreyGene': 'prey3', 'AvgSpec': 30, 'BFDR': 0.02 },
{ 'Bait': 'AAA', 'Prey': 'NP_44444', 'PreyGene': 'prey4', 'AvgSpec': 15, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': 'NP_55555', 'PreyGene': 'prey5', 'AvgSpec': 25, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': 'NP_66666', 'PreyGene': 'prey6', 'AvgSpec': 40, 'BFDR': 0.01 },
{ 'Bait': 'BBB', 'Prey': 'NP_11111', 'PreyGene': 'prey1', 'AvgSpec': 10, 'BFDR': 0.05 },
{ 'Bait': 'BBB', 'Prey': 'NP_22222', 'PreyGene': 'prey2', 'AvgSpec': 20, 'BFDR': 0.01 },
{ 'Bait': 'BBB', 'Prey': 'NP_77777', 'PreyGene': 'prey7', 'AvgSpec': 30, 'BFDR': 0.01 },
])
genemap = {
'NP_11111': '1',
'NP_22222': '2',
'NP_02222': '2',
'NP_33333': '3',
'NP_55555': '5',
'NP_66666': '6',
}
expected = pd.DataFrame([
{ 'Bait': 'AAA', 'Prey': '1', 'PreyGene': 'prey1', 'AvgSpec': 10, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': '2', 'PreyGene': 'prey2', 'AvgSpec': 20, 'BFDR': 0 },
{ 'Bait': 'AAA', 'Prey': '3', 'PreyGene': 'prey3', 'AvgSpec': 30, 'BFDR': 0.02 },
{ 'Bait': 'AAA', 'Prey': 'NP_44444', 'PreyGene': 'prey4', 'AvgSpec': 15, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': '5', 'PreyGene': 'prey5', 'AvgSpec': 25, 'BFDR': 0.01 },
{ 'Bait': 'AAA', 'Prey': '6', 'PreyGene': 'prey6', 'AvgSpec': 40, 'BFDR': 0.01 },
{ 'Bait': 'BBB', 'Prey': '1', 'PreyGene': 'prey1', 'AvgSpec': 10, 'BFDR': 0.05 },
{ 'Bait': 'BBB', 'Prey': '2', 'PreyGene': 'prey2', 'AvgSpec': 20, 'BFDR': 0.01 },
{ 'Bait': 'BBB', 'Prey': 'NP_77777', 'PreyGene': 'prey7', 'AvgSpec': 30, 'BFDR': 0.01 },
])
self.assertEqual(map_file_ids(df, genemap), expected)
class FilterSaint(pyfakefs.fake_filesystem_unittest.TestCase):
def assertDataframeEqual(self, a, b, msg):
try:
| pd_testing.assert_frame_equal(a, b) | pandas.testing.assert_frame_equal |
__version__ = '0.1.3'
__maintainer__ = '<NAME> 31.12.2019'
__contributors__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>'
__birthdate__ = '31.12.2019'
__status__ = 'dev' # options are: dev, test, prod
#----- imports & packages ------
if __package__ is None or __package__ == '':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(__file__))))
import pprint
import pandas as pd
import numpy as np
import warnings
from pathlib import Path
from zipfile import ZipFile
class DataParser:
def __init__(self, configDict: dict, datasetID: str, loadEncrypted=False):
"""
Basic class for parsing a mobility survey trip data set. Currently the both German travel surveys MiD 2008 and
MiD 2017 are pre-configured and one of the two can be given (default: MiD 2017).
The data set can be provided from an encrypted file on a server in which case the link to the ZIP-file as well
as a link to the file within the ZIP-file have to be supplied in the globalConfig and a password has to be
supplied in the parseConfig.
Columns relevant for the EV simulation are selected from the entirety of the data and renamed to VencoPy
internal variable names given in the dictionary parseConfig['dataVariables'] for the respective survey data set.
Manually configured exclude, include, greaterThan and smallerThan filters are applied as they are specified in
parseConfig. For some columns, raw data is transferred to human readable strings and respective columns are
added. Pandas timestamp columns are synthesized from the given trip start and trip end time information.
:param configDict: A dictionary containing multiple yaml config files
:param datasetID: Currently, MiD08 and MiD17 are implemented as travel survey data sets
:param loadEncrypted: If True, load an encrypted ZIP file as specified in parseConfig
"""
self.parseConfig = configDict['parseConfig']
self.globalConfig = configDict['globalConfig']
self.localPathConfig = configDict['localPathConfig']
self.datasetID = self.checkDatasetID(datasetID, self.parseConfig)
self.rawDataPath = Path(self.localPathConfig['pathAbsolute'][self.datasetID]) / self.globalConfig['files'][self.datasetID]['tripsDataRaw']
self.subDict = {}
self.rawData = None
self.data = None
self.__filterDict = {}
self.columns = self.compileVariableList()
self.filterDictNameList = ['include', 'exclude', 'greaterThan', 'smallerThan']
self.updateFilterDict()
print('Parsing properties set up')
if loadEncrypted:
print(f"Starting to retrieve encrypted data file from "
f"{self.globalConfig['pathAbsolute']['encryptedZipfile']}")
self.loadEncryptedData(pathToZip=Path(self.globalConfig['pathAbsolute']['encryptedZipfile']) /
self.globalConfig['files'][self.datasetID]['encryptedZipFileB2'],
pathInZip=self.globalConfig['files'][self.datasetID]['tripDataZipFileRaw'])
else:
print(f"Starting to retrieve local data file from {self.rawDataPath}")
self.loadData()
def updateFilterDict(self) -> None:
"""
Internal function to parse the filter dictionary of a specified data set from parseConfig.yaml
:return: None
"""
self.__filterDict[self.datasetID] = self.parseConfig['filterDicts'][self.datasetID]
self.__filterDict[self.datasetID] = {iKey: iVal for iKey, iVal in self.__filterDict[self.datasetID].items() if self.__filterDict[self.datasetID][iKey] is not
None}
def checkDatasetID(self, datasetID: str, parseConfig: dict) -> str:
"""
General check if data set ID is defined in parseConfig.yaml
:param datasetID: list of strings declaring the datasetIDs to be read in
:param parseConfig: A yaml config file holding a dictionary with the keys 'pathRelative' and 'pathAbsolute'
:return: Returns a string value of a mobility data
"""
availableDatasetIDs = parseConfig['dataVariables']['datasetID']
assert datasetID in availableDatasetIDs, \
f'Defined datasetID {datasetID} not specified under dataVariables in parseConfig. Specified datasetIDs ' \
f'are {availableDatasetIDs}'
return datasetID
def compileVariableList(self) -> list:
"""
Clean up the replacement dictionary of raw data file variable (column) names. This has to be done because some
variables that may be relevant for the analysis later on are only contained in one raw data set while not
contained in another one. E.g. if a trip is an intermodal trip was only assessed in the MiD 2017 while it wasn't
in the MiD 2008. This has to be mirrored by the filter dict for the respective data set.
:return: List of variables
"""
listIndex = self.parseConfig['dataVariables']['datasetID'].index(self.datasetID)
variables = [val[listIndex] if not val[listIndex] == 'NA' else 'NA' for key, val in
self.parseConfig['dataVariables'].items()]
variables.remove(self.datasetID)
self.removeNA(variables)
return variables
def removeNA(self, variables: list):
"""
Removes all strings that can be capitalized to 'NA' from the list of variables
:param variables: List of variables of the mobility dataset
:return: Returns a list with non NA values
"""
vars = [iVar.upper() for iVar in variables]
counter = 0
for idx, iVar in enumerate(vars):
if iVar == 'NA':
del variables[idx - counter]
counter += 1
def loadData(self):
"""
Loads data specified in self.rawDataPath and stores it in self.rawData. Raises an exception if a invalid suffix
is specified in self.rawDataPath. READ IN OF CSV HAS NOT BEEN EXTENSIVELY TESTED BEFORE BETA RELEASE.
:return: None
"""
# Future releases: Are potential error messages (.dta not being a stata file even as the ending matches)
# readable for the user? Should we have a manual error treatment here?
if self.rawDataPath.suffix == '.dta':
self.rawData = pd.read_stata(self.rawDataPath, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
# This has not been tested before the beta release
elif self.rawDataPath.suffix == '.csv':
self.rawData = pd.read_csv(self.rawDataPath)
else:
Exception(f"Data type {self.rawDataPath.suffix} not yet specified. Available types so far are .dta and "
f".csv")
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def loadEncryptedData(self, pathToZip, pathInZip):
"""
Since the MiD data sets are only accessible by an extensive data security contract, VencoPy provides the
possibility to access encrypted zip files. An encryption password has to be given in parseConfig.yaml in order
to access the encrypted file. Loaded data is stored in self.rawData
:param pathToZip: path from current working directory to the zip file or absolute path to zipfile
:param pathInZip: Path to trip data file within the encrypted zipfile
:return: None
"""
with ZipFile(pathToZip) as myzip:
if '.dta' in pathInZip:
self.rawData = pd.read_stata(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')),
convert_categoricals=False, convert_dates=False, preserve_dtypes=False)
else: # if '.csv' in pathInZip:
self.rawData = pd.read_csv(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')), sep=';', decimal=',')
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def selectColumns(self):
"""
Function to filter the rawData for only relevant columns as specified by parseConfig and cleaned in
self.compileVariablesList(). Stores the subset of data in self.data
:return: None
"""
self.data = self.rawData.loc[:, self.columns]
def harmonizeVariables(self):
"""
Harmonizes the input data variables to match internal VencoPy names given as specified in the mapping in
parseConfig['dataVariables']. So far mappings for MiD08 and MiD17 are given. Since the MiD08 doesn't provide
a combined household and person unique identifier, it is synthesized of the both IDs.
:return: None
"""
replacementDict = self.createReplacementDict(self.datasetID, self.parseConfig['dataVariables'])
dataRenamed = self.data.rename(columns=replacementDict)
if self.datasetID == 'MiD08':
dataRenamed['hhPersonID'] = (dataRenamed['hhID'].astype('string') +
dataRenamed['personID'].astype('string')).astype('int')
self.data = dataRenamed
print('Finished harmonization of variables')
def createReplacementDict(self, datasetID: str, dictRaw: dict) -> dict:
"""
Creates the mapping dictionary from raw data variable names to VencoPy internal variable names as specified
in parseConfig.yaml for the specified data set.
:param datasetID: list of strings declaring the datasetIDs to be read in
:param dictRaw: Contains dictionary of the raw data
:return: Dictionary with internal names as keys and raw data column names as values.
"""
if datasetID in dictRaw['datasetID']:
listIndex = dictRaw['datasetID'].index(datasetID)
return {val[listIndex]: key for (key, val) in dictRaw.items()}
else:
raise ValueError(f'Data set {datasetID} not specified in parseConfig variable dictionary.')
def convertTypes(self):
"""
Convert raw column types to predefined python types as specified in parseConfig['inputDTypes'][datasetID]. This is mainly
done for performance reasons. But also in order to avoid index values that are of type int to be cast to float.
The function operates only on self.data and writes back changes to self.data
:return: None
"""
# Filter for dataset specific columns
conversionDict = self.parseConfig['inputDTypes'][self.datasetID]
keys = {iCol for iCol in conversionDict.keys() if iCol in self.data.columns}
self.subDict = {key: conversionDict[key] for key in conversionDict.keys() & keys}
self.data = self.data.astype(self.subDict)
def returnDictBottomValues(self, baseDict: dict, lst: list = []) -> list:
"""
Returns a list of all dictionary values of the last dictionary level (the bottom) of baseDict. The parameter
lst is used as an interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, is used as interface to next recursion
:return: Returns a list with all the bottom dictionary values
"""
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomValues(iVal, lst)
else:
if iVal is not None:
lst.append(iVal)
return lst
def checkFilterDict(self):
"""
Checking if all values of filter dictionaries are of type list. Currently only checking if list of list str
not typechecked all(map(self.__checkStr, val). Conditionally triggers an assert.
:return: None
"""
assert all(isinstance(val, list) for val in self.returnDictBottomValues(self.__filterDict[self.datasetID])), \
f'All values in filter dictionaries have to be lists, but are not'
def returnDictBottomKeys(self, baseDict: dict, lst: list = None) -> list:
"""
Returns the lowest level keys of baseDict and returns all of them as a list. The parameter lst is used as
interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, used as interface between recursion levels
:return: Returns a list with all the bottom level dictionary keys
"""
if lst is None:
lst = []
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomKeys(iVal, lst)
else:
if iVal is not None:
lst.append(iKey)
return lst
def filter(self):
"""
Wrapper function to carry out filtering for the four filter logics of including, excluding, greaterThan and
smallerThan. If a filterDict is defined with a different key, a warning is thrown. The function operates on
self.data class-internally.
:return: None
"""
print(f'Starting filtering, applying {len(self.returnDictBottomKeys(self.__filterDict[self.datasetID]))} filters.')
ret = pd.DataFrame(index=self.data.index)
# Future releases: as discussed before we could indeed work here with a plug and pray approach.
# we would need to introduce a filter manager and a folder structure where to look for filters.
# this is very similar code than the one from ioproc. If we want to go down this route we should
# take inspiration from the code there. It was not easy to get it right in the first place. This
# might be easy to code but hard to implement correctly.
for iKey, iVal in self.__filterDict[self.datasetID].items():
if iKey == 'include':
ret = ret.join(self.setIncludeFilter(iVal, self.data.index))
elif iKey == 'exclude':
ret = ret.join(self.setExcludeFilter(iVal, self.data.index))
elif iKey == 'greaterThan':
ret = ret.join(self.setGreaterThanFilter(iVal, self.data.index))
elif iKey == 'smallerThan':
ret = ret.join(self.setSmallerThanFilter(iVal, self.data.index))
else:
warnings.warn(f'A filter dictionary was defined in the parseConfig with an unknown filtering key. '
f'Current filtering keys comprise include, exclude, smallerThan and greaterThan.'
f'Continuing with ignoring the dictionary {iKey}')
self.data = self.data[ret.all(axis='columns')]
self.filterAnalysis(ret)
def setIncludeFilter(self, includeFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for include filter dict from parseConfig.yaml
:param includeFilterDict: Dictionary of include filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a data frame with individuals using car as a mode of transport
"""
incFilterCols = pd.DataFrame(index=dataIndex, columns=includeFilterDict.keys())
for incCol, incElements in includeFilterDict.items():
incFilterCols[incCol] = self.data[incCol].isin(incElements)
return incFilterCols
def setExcludeFilter(self, excludeFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for exclude filter dict from parseConfig.yaml
:param excludeFilterDict: Dictionary of exclude filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a filtered data frame with exclude filters
"""
exclFilterCols = pd.DataFrame(index=dataIndex, columns=excludeFilterDict.keys())
for excCol, excElements in excludeFilterDict.items():
exclFilterCols[excCol] = ~self.data[excCol].isin(excElements)
return exclFilterCols
def setGreaterThanFilter(self, greaterThanFilterDict: dict, dataIndex):
"""
Read-in function for greaterThan filter dict from parseConfig.yaml
:param greaterThanFilterDict: Dictionary of greater than filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return:
"""
greaterThanFilterCols = pd.DataFrame(index=dataIndex, columns=greaterThanFilterDict.keys())
for greaterCol, greaterElements in greaterThanFilterDict.items():
greaterThanFilterCols[greaterCol] = self.data[greaterCol] >= greaterElements.pop()
if len(greaterElements) > 0:
warnings.warn(f'You specified more than one value as lower limit for filtering column {greaterCol}.'
f'Only considering the last element given in the parseConfig.')
return greaterThanFilterCols
def setSmallerThanFilter(self, smallerThanFilterDict: dict, dataIndex) -> pd.DataFrame:
"""
Read-in function for smallerThan filter dict from parseConfig.yaml
:param smallerThanFilterDict: Dictionary of smaller than filters defined in parseConfig.yaml
:param dataIndex: Index for the data frame
:return: Returns a data frame of trips covering a distance of less than 1000 km
"""
smallerThanFilterCols = pd.DataFrame(index=dataIndex, columns=smallerThanFilterDict.keys())
for smallerCol, smallerElements in smallerThanFilterDict.items():
smallerThanFilterCols[smallerCol] = self.data[smallerCol] <= smallerElements.pop()
if len(smallerElements) > 0:
warnings.warn(f'You specified more than one value as upper limit for filtering column {smallerCol}.'
f'Only considering the last element given in the parseConfig.')
return smallerThanFilterCols
def filterAnalysis(self, filterData: pd.DataFrame):
"""
Function supplies some aggregate info of the data after filtering to the user Function does not change any
class attributes
:param filterData:
:return: None
"""
lenData = sum(filterData.all(axis='columns'))
boolDict = {iCol: sum(filterData[iCol]) for iCol in filterData}
print(f'The following values were taken into account after filtering:')
pprint.pprint(boolDict)
print(f"All filters combined yielded a total of {lenData} was taken into account")
print(f'This corresponds to {lenData / len(filterData)* 100} percent of the original data')
def filterConsistentHours(self):
"""
Filtering out records where starting hour is after end hour but trip takes place on the same day.
These observations are data errors.
:return: No returns, operates only on the class instance
"""
if self.datasetID == 'MiD17' or self.datasetID == 'MiD08':
dat = self.data
self.data = dat.loc[(dat['tripStartClock'] <= dat['tripEndClock']) | (dat['tripEndNextDay'] == 1), :]
# If we want to get rid of tripStartClock and tripEndClock (they are redundant variables)
# self.data = dat.loc[pd.to_datetime(dat.loc[:, 'tripStartHour']) <= pd.to_datetime(dat.loc[:, 'tripEndHour']) |
# (dat['tripEndNextDay'] == 1), :]
def addStrColumnFromVariable(self, colName: str, varName: str):
"""
Replaces each occurence of a MiD/KiD variable e.g. 1,2,...,7 for weekdays with an explicitly mapped string e.g.
'MON', 'TUE',...,'SUN'.
:param colName: Name of the column in self.data where the explicit string info is stored
:param varName: Name of the VencoPy internal variable given in config/parseConfig['dataVariables']
:return: None
"""
self.data.loc[:, colName] \
= self.data.loc[:, varName].replace(self.parseConfig['Replacements'][self.datasetID][varName])
def addStrColumns(self, weekday=True, purpose=True):
"""
Adds string columns for either weekday or purpose.
:param weekday: Boolean identifier if weekday string info should be added in a separate column
:param purpose: Boolean identifier if purpose string info should be added in a separate column
:return: None
"""
if weekday:
self.addStrColumnFromVariable(colName='weekdayStr', varName='tripStartWeekday')
if purpose:
self.addStrColumnFromVariable(colName='purposeStr', varName='tripPurpose')
def composeTimestamp(self, data: pd.DataFrame = None,
colYear: str = 'tripStartYear',
colWeek: str = 'tripStartWeek',
colDay: str = 'tripStartWeekday',
colHour: str = 'tripStartHour',
colMin: str = 'tripStartMinute',
colName: str = 'timestampStart') -> np.datetime64:
"""
:param data: a data frame
:param colYear: year of start of a particular trip
:param colWeek: week of start of a particular trip
:param colDay: weekday of start of a particular trip
:param colHour: hour of start of a particular trip
:param colMin: minute of start of a particular trip
:param colName:
:return: Returns a detailed time stamp
"""
data[colName] = pd.to_datetime(data.loc[:, colYear], format='%Y') + \
| pd.to_timedelta(data.loc[:, colWeek] * 7, unit='days') | pandas.to_timedelta |
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime as dt
from collections import defaultdict
from dateutil.relativedelta import relativedelta
def collect_dates_for_cohort(df_pop, control_reservoir, control_dates, col_names=None):
'''
Fill 'control_used' dictionary with the dates (specified in 'control_dates') of each person
(represented by their CPF) regarding the main events considered in the analysis.
Args:
df_pop:
pandas.DataFrame.
control_reservoir:
collections.defaultdict.
control_used:
collections.defaultdict.
control_dates:
collections.defaultdict.
col_names:
dictionary.
Return:
None.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)",
"HOSPITALIZACAO COVID": "DATA HOSPITALIZACAO",
}
for j in tqdm(range(df_pop.shape[0])):
cpf = df_pop["CPF"].iat[j]
sex, age = df_pop["SEXO"].iat[j], df_pop["IDADE"].iat[j]
# Different outcomes' dates
dt_d1 = df_pop[col_names["D1"]].iat[j]
dt_d2 = df_pop[col_names["D2"]].iat[j]
dt_death = df_pop[col_names["OBITO COVID"]].iat[j]
dt_death_general = df_pop[col_names["OBITO GERAL"]].iat[j]
dt_hosp_covid = df_pop[col_names["HOSPITALIZACAO COVID"]].iat[j]
control_reservoir[(age,sex)].append(cpf)
if pd.notna(dt_d1):
control_dates["D1"][cpf] = dt_d1
if pd.notna(dt_d2):
control_dates["D2"][cpf] = dt_d2
if pd.notna(dt_death):
control_dates["DEATH COVID"][cpf] = dt_death
if pd.notna(dt_death_general):
control_dates["DEATH GENERAL"][cpf] = dt_death_general
if pd.notna(dt_hosp_covid):
control_dates["HOSPITALIZATION COVID"][cpf] = dt_hosp_covid
def rearrange_controls(control_reservoir, seed):
'''
Shuffle the order of the controls in the structure containing all
control candidates.
Args:
control_reservoir:
collections.defaultdict.
seed:
Integer.
Return:
None.
'''
np.random.seed(seed)
for key in control_reservoir.keys():
np.random.shuffle(control_reservoir[key])
def perform_matching(datelst, df_vac, control_reservoir, control_used, control_dates, col_names):
'''
Description.
Args:
datelst:
List of datetime.date.
df_vac:
pandas.DataFrame.
control_reservoir:
collections.defaultdict.
control_used:
collections.defaultdict.
control_dates:
collections.defaultdict.
col_names:
dictionary.
Return:
pareados:
pandas.DataFrame.
matched:
dictionary.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
matchings = defaultdict(lambda:-1)
matched = defaultdict(lambda:False)
for current_date in tqdm(datelst):
# Select all people who was vaccinated at the current date
df_vac["compare_date"] = df_vac[col_names["D1"]].apply(lambda x: True if x==current_date else False)
current_vaccinated = df_vac[df_vac["compare_date"]==True]
cpf_list = current_vaccinated["CPF"].tolist()
age_list = current_vaccinated["IDADE"].tolist()
sex_list = current_vaccinated["SEXO"].tolist()
# For each person vaccinated at the current date, check if there is a control for he/she.
for j in range(0, len(cpf_list)):
pair = find_pair(current_date, age_list[j], sex_list[j], control_reservoir, control_used, control_dates)
if pair!=-1:
matchings[cpf_list[j]] = pair
items_matching = matchings.items()
pareados = pd.DataFrame({"CPF CASO": [ x[0] for x in items_matching ], "CPF CONTROLE": [ x[1] for x in items_matching ]})
for cpf in [ x[0] for x in items_matching ]+[ x[1] for x in items_matching ]:
matched[cpf]=True
return pareados, matched
def get_events(df_pop, pareados, matched, col_names):
'''
Description.
Args:
df_pop:
pareados:
matched:
col_names:
Return:
datas:
pandas.DataFrame.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
data_obito = defaultdict(lambda:np.nan)
data_obito_geral = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
for j in range(df_pop.shape[0]):
cpf = df_pop["CPF"].iat[j]
d1_dt = df_pop[col_names["D1"]].iat[j]
d2_dt = df_pop[col_names["D2"]].iat[j]
obito = df_pop[col_names["OBITO COVID"]].iat[j]
obito_geral = df_pop[col_names["OBITO GERAL"]].iat[j]
#teste = df_pop["DATA SOLICITACAO(TESTES)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
elif not pd.isna(obito_geral):
data_obito_geral[cpf] = obito_geral
if not | pd.isna(d1_dt) | pandas.isna |
# coding: utf-8
# # ASSIGNMENT 1
# In[5]:
import pandas as pd
# In[6]:
from matplotlib import pyplot as plt
get_ipython().magic(u'matplotlib inline')
import numpy as np
# In[7]:
import seaborn as sns
# In[ ]:
#In the above cells, I imported libraries required for this assignment.
# In[9]:
df_math = pd.read_clipboard(header=None)
# In[ ]:
#In the above cell, I created a dataframe with the clipboard text.
# In[11]:
df_math = df_math.rename(index=int, columns={0:"Rank",1:"Country",2:"Score"})
# In[ ]:
#In the above cell, I changed the name of the columns. In this scenario, [0,1,2] are integers.
# In[13]:
df_science = pd.read_clipboard(header=None)
# In[15]:
df_science = df_science.rename(index=int, columns={0:"Rank",1:"Country",2:"Score"})
# In[19]:
df_reading = pd.read_clipboard(header=None)
# In[21]:
df_reading = df_reading.rename(index=int, columns={0:"Rank",1:"Country",2:"Score"})
# In[23]:
temp = pd.merge(df_math,df_science, on='Country', how='outer')
# In[ ]:
#In the above cell, I merged df_math & df_science dataframes based on country in the form of outer.
# In[25]:
temp = pd.merge(temp,df_reading, on='Country', how='outer')
# In[ ]:
#In the above cell, I merged tempo & df_reading dataframes based on country in the form of outer.
# In[35]:
del temp['Rank']
# In[ ]:
#In the above cell, I deleted the column with name 'Rank'.
# In[43]:
df = temp.rename(index=int, columns={"Score_x":"Math","Score_y":"Science","Score":"Reading"})
# In[45]:
df['Math'] = | pd.to_numeric(df['Math']) | pandas.to_numeric |
import time
import logging
from TwitterAPI import TwitterAPI
from twython import Twython
from twython import TwythonError, TwythonRateLimitError, TwythonAuthError
import pandas as pd
from datetime import datetime, timedelta
from spikexplore.NodeInfo import NodeInfo
from spikexplore.graph import add_node_attributes, add_edges_attributes
logger = logging.getLogger(__name__)
class TwitterCredentials:
def __init__(self, app_key, access_token, consumer_key=None, consumer_secret=None):
self.app_key = app_key
self.access_token = access_token
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
class TweetsGetterV1:
def __init__(self, credentials, config):
# Instantiate an object
self.app_key = credentials.app_key
self.access_token = credentials.access_token
self.config = config
self.twitter_handle = Twython(self.app_key, access_token=self.access_token)
pass
def _filter_old_tweets(self, tweets):
max_day_old = self.config.max_day_old
if not max_day_old:
return tweets
days_limit = datetime.now() - timedelta(days=max_day_old)
tweets_filt = filter(lambda t: datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S +0000 %Y') >= days_limit,
tweets)
return list(tweets_filt)
def get_user_tweets(self, username):
# Collect tweets from a username
count = self.config.max_tweets_per_user
# Test if ok
try:
user_tweets_raw = self.twitter_handle.get_user_timeline(screen_name=username,
count=count, include_rts=True,
tweet_mode='extended', exclude_replies=False)
# remove old tweets
user_tweets_filt = self._filter_old_tweets(user_tweets_raw)
# make a dictionary
user_tweets = {x['id']: x for x in user_tweets_filt}
tweets_metadata = \
map(lambda x: (x[0], {'user': x[1]['user']['screen_name'],
'name': x[1]['user']['name'],
'user_details': x[1]['user']['description'],
'mentions': list(
map(lambda y: y['screen_name'], x[1]['entities']['user_mentions'])),
'hashtags': list(map(lambda y: y['text'], x[1]['entities']['hashtags'])),
'retweet_count': x[1]['retweet_count'],
'favorite_count': x[1]['favorite_count'], 'created_at': x[1]['created_at'],
'account_creation': x[1]['user']['created_at'],
'account_followers': x[1]['user']['followers_count'],
'account_following': x[1]['user']['friends_count'],
'account_statuses': x[1]['user']['statuses_count'],
'account_favourites': x[1]['user']['favourites_count'],
'account_verified': x[1]['user']['verified'],
'account_default_profile': x[1]['user']['default_profile'],
'account_default_profile_image': x[1]['user']['default_profile_image']}),
user_tweets.items())
return user_tweets, dict(tweets_metadata)
except TwythonAuthError as e_auth:
if e_auth.error_code == 401:
logger.warning('Unauthorized access to user {}. Skipping.'.format(username))
return {}, {}
else:
logger.error('Cannot access to twitter API, authentification error. {}'.format(e_auth.error_code))
raise
except TwythonRateLimitError as e_lim:
logger.warning('API rate limit reached')
logger.warning(e_lim)
remainder = float(self.twitter_handle.get_lastfunction_header(header='x-rate-limit-reset')) - time.time()
logger.warning('Retry after {} seconds.'.format(remainder))
time.sleep(remainder + 1)
del self.twitter_handle
self.twitter_handle = Twython(self.app_key, access_token=self.access_token) # seems you need this
return {}, {} # best way to handle it ?
except TwythonError as e:
logger.error('Twitter API returned error {} for user {}.'.format(e.error_code, username))
return {}, {}
def reshape_node_data(self, node_df):
# user name user_details mentions hashtags retweet_count favorite_count
# created_at account_creation account_followers account_following account_statuses account_favourites
# account_verified account_default_profile account_default_profile_image spikyball_hop
node_df = node_df[
['user', 'name', 'user_details', 'spikyball_hop', 'account_creation', 'account_default_profile',
'account_default_profile_image', 'account_favourites', 'account_followers', 'account_following',
'account_statuses', 'account_verified']]
node_df = node_df.reset_index().groupby('user').max().rename(columns={'index': 'max_tweet_id'})
return node_df
class TweetsGetterV2:
def __init__(self, credentials, config):
self.twitter_handle = TwitterAPI(credentials.consumer_key, credentials.consumer_secret,
api_version='2', auth_type='oAuth2')
self.config = config
self.start_time = None
if config.max_day_old:
days_limit = datetime.now() - timedelta(days=config.max_day_old)
# date format: 2010-11-06T00:00:00Z
self.start_time = days_limit.strftime('%Y-%m-%dT%H:%M:%SZ')
self.user_cache = {}
def _safe_twitter_request(self, request_str, params):
res = self.twitter_handle.request(request_str, params)
while res.status_code == 429: # rate limit reached
logger.warning('API rate limit reached')
remainder = float(res.headers['x-rate-limit-reset']) - time.time()
logger.warning('Retry after {} seconds.'.format(remainder))
time.sleep(remainder + 1)
res = self.twitter_handle.request(request_str, params)
if res.status_code != 200:
logger.warning('API returned with code {}'.format(res.status_code))
return res
def _get_user_info(self, username):
if username not in self.user_cache:
params = {'user.fields': 'created_at,verified,description,public_metrics,protected,profile_image_url'}
res = dict(self._safe_twitter_request('users/by/username/:{}'.format(username), params).json())
if 'errors' in res:
self.user_cache[username] = None
for e in res['errors']:
logger.info(e['detail'])
else:
self.user_cache[username] = res['data']
return self.user_cache[username]
def _fill_user_info(self, includes):
if 'users' not in includes:
return
for u in includes['users']:
if u['username'] not in self.user_cache:
self.user_cache[u['username']] = u
def _get_user_tweets(self, username, num_tweets, next_token):
assert(num_tweets <= 100 and num_tweets > 0)
params = {'max_results': num_tweets, 'expansions': 'author_id,entities.mentions.username,referenced_tweets.id',
'tweet.fields': 'entities,created_at,public_metrics,lang,referenced_tweets',
'user.fields': 'verified,description,created_at,public_metrics,protected,profile_image_url'}
if self.start_time:
params['start_time'] = self.start_time
if next_token:
params['pagination_token'] = next_token
user_info = self._get_user_info(username)
if not user_info: # not found
return {}, {}, None
if user_info['protected']:
logger.info('Skipping user {} - protected account'.format(username))
return {}, {}, None
tweets_raw = dict(self._safe_twitter_request('users/:{}/tweets'.format(user_info['id']), params).json())
if 'errors' in tweets_raw:
err_details = set([e['detail'] for e in tweets_raw['errors']])
for e in err_details:
logger.info(e)
if 'data' not in tweets_raw:
logger.info('Empty results for {}'.format(username))
return {}, {}, None
user_tweets = {int(x['id']): x for x in tweets_raw['data']}
referenced_tweets = {x['id']: x for x in tweets_raw['includes'].get('tweets', {})}
# make the tweets dict similar to the one retrieved using APIv1
for k in user_tweets.keys():
user_tweets[k]['id_str'] = user_tweets[k]['id']
user_tweets[k]['id'] = k # preserve 'id' as int (used as index)
user_tweets[k]['full_text'] = user_tweets[k].pop('text')
user_tweets[k]['user'] = {'id': int(user_info['id']), 'id_str': user_info['id'],
'screen_name': user_info['username'], 'name': user_info['name'],
'description': user_info['description'], 'verified': user_info['verified'],
'protected': user_info['protected'],
'created_at': user_info['created_at'],
'followers_count': user_info['public_metrics']['followers_count'],
'friends_count': user_info['public_metrics']['following_count'],
'statuses_count': user_info['public_metrics']['tweet_count']}
# handle retweet info
if 'referenced_tweets' in user_tweets[k]:
ref = list(filter(lambda x: x['type'] == 'quoted' or x['type'] == 'retweeted',
user_tweets[k]['referenced_tweets']))
if ref:
ref_type = ref[0]['type']
ref_txt = ''
if ref_type == 'quoted':
ref_txt = user_tweets[k]['full_text'] + " "
ref_txt += referenced_tweets[ref[0]['id']]['text']
user_tweets[k]['retweeted_status'] = {'full_text': ref_txt}
tweets_metadata = \
dict(map(lambda x: (x[0], {'user': user_info['username'],
'name': user_info['name'],
'user_details': user_info['description'],
'mentions': list(
map(lambda y: y['username'], x[1].get('entities', {}).get('mentions', {}))),
'hashtags': list(
map(lambda y: y['tag'], x[1].get('entities', {}).get('hashtags', {}))),
'retweet_count': x[1]['public_metrics']['retweet_count'],
'favorite_count': x[1]['public_metrics']['like_count'],
'created_at': x[1]['created_at'],
'account_creation': user_info['created_at'],
'account_followers': user_info['public_metrics']['followers_count'],
'account_following': user_info['public_metrics']['following_count'],
'account_statuses': user_info['public_metrics']['tweet_count'],
'account_verified': user_info['verified']}),
user_tweets.items()))
if 'includes' in tweets_raw:
self._fill_user_info(tweets_raw['includes'])
return user_tweets, tweets_metadata, tweets_raw['meta'].get('next_token', None)
def get_user_tweets(self, username):
remaining_number_of_tweets = self.config.max_tweets_per_user
next_token = None
user_tweets_acc = {}
tweets_metadata_acc = {}
while remaining_number_of_tweets > 0:
number_of_tweets = 100 if remaining_number_of_tweets > 100 else remaining_number_of_tweets
remaining_number_of_tweets -= number_of_tweets
user_tweets, tweets_metadata, next_token = self._get_user_tweets(username, number_of_tweets, next_token)
user_tweets_acc.update(user_tweets)
tweets_metadata_acc.update(tweets_metadata)
if not next_token:
break
return user_tweets_acc, tweets_metadata_acc
def reshape_node_data(self, node_df):
node_df = node_df[
['user', 'name', 'user_details', 'spikyball_hop', 'account_creation',
'account_followers', 'account_following',
'account_statuses', 'account_verified']]
node_df = node_df.reset_index().groupby('user').max().rename(columns={'index': 'max_tweet_id'})
return node_df
class TwitterNetwork:
class TwitterNodeInfo(NodeInfo):
def __init__(self, user_hashtags=None, user_tweets=None, tweets_meta=pd.DataFrame()):
self.user_hashtags = user_hashtags if user_hashtags else {}
self.user_tweets = user_tweets if user_tweets else {}
self.tweets_meta = tweets_meta
def update(self, new_info):
self.user_hashtags.update(new_info.user_hashtags)
self.user_tweets.update(new_info.user_tweets)
def get_nodes(self):
return self.tweets_meta
def __init__(self, credentials, config):
if config.api_version == 1:
self.tweets_getter = TweetsGetterV1(credentials, config)
elif config.api_version == 2:
self.tweets_getter = TweetsGetterV2(credentials, config)
else:
raise ValueError("Invalid api version")
self.config = config
def create_node_info(self):
return self.TwitterNodeInfo()
def get_neighbors(self, user):
if not isinstance(user, str):
return self.TwitterNodeInfo(), | pd.DataFrame() | pandas.DataFrame |
import os
import tempfile
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from dask.datasets import timeseries
from dask.distributed import Client
from pandas.testing import assert_frame_equal
@pytest.fixture()
def timeseries_df(c):
pdf = timeseries(freq="1d").compute().reset_index(drop=True)
# impute nans in pandas dataframe
col1_index = np.random.randint(0, 30, size=int(pdf.shape[0] * 0.2))
col2_index = np.random.randint(0, 30, size=int(pdf.shape[0] * 0.3))
pdf.loc[col1_index, "x"] = np.nan
pdf.loc[col2_index, "y"] = np.nan
c.create_table("timeseries", pdf, persist=True)
return pdf
@pytest.fixture()
def df_simple():
return | pd.DataFrame({"a": [1, 2, 3], "b": [1.1, 2.2, 3.3]}) | pandas.DataFrame |
from __future__ import division
import matplotlib
matplotlib.use('TkAgg')
import multiprocessing as mp
import itertools
import numpy as np
from scipy import interpolate
from pylab import flipud
import pandas as pd
try:
from pandas import Categorical
except ImportError:
from pandas.core.categorical import Categorical
import re
from collections import defaultdict
from multiflexxlib import plotting
from multiflexxlib import ub
from multiflexxlib.ub import UBMatrix, etok, ktoe, angle_to_q
import pyclipper
import matplotlib.pyplot as plt
import matplotlib.patches as mpl_patches
import matplotlib.path as mpl_path
from matplotlib.collections import PatchCollection
from matplotlib.colors import LogNorm
from matplotlib.widgets import Button
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
import pickle
import sys
import os
import pkg_resources
from multiflexxlib._version import __version__
try:
import tkinter
from tkinter import filedialog
except ImportError:
import Tkinter as tkinter
import tkFileDialog as filedialog
import logging
logger = logging.getLogger()
logger.setLevel('INFO')
logger.addHandler(logging.StreamHandler(sys.stdout))
BIN_ADAPTIVE = 'adaptive'
BIN_REGULAR = 'regular'
NUM_CHANNELS = 31
EF_LIST = [2.5, 3.0, 3.5, 4.0, 4.5]
CHANNEL_SEPARATION = 2.5
NORM_FACTOR = [1.0, 1.16, 1.23, 1.30, 1.27]
# Apeture angle correction
try:
DETECTOR_WORKING = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/alive.csv'))
except IOError:
print('Dead detector map not found - assuming all working.')
DETECTOR_WORKING = np.ones([NUM_CHANNELS, len(EF_LIST)])
try:
WEIGHTS = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/weights.csv'), delimiter=',')
except IOError:
print('Boundary angle channel strategy not defined - assuming equal weights.')
WEIGHTS = np.ones([NUM_CHANNELS, len(EF_LIST)])
try:
INTENSITY_COEFFICIENT = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/int_corr.csv'), delimiter=',')
except IOError:
print('Intensity correction matrix not found - assuming all ones.')
INTENSITY_COEFFICIENT = np.ones([NUM_CHANNELS, len(EF_LIST)])
# TODO: do something with this abomination
INTENSITY_COEFFICIENT = INTENSITY_COEFFICIENT / NORM_FACTOR
def _nan_float(string):
try:
return float(string)
except ValueError:
if '*' in string:
return np.NaN
else:
raise
def _nan_int(string):
try:
return int(string)
except ValueError:
if '*' in string:
return np.NaN
else:
raise
def _extract_ki_from_header(en, fx, kfix):
e_fix = ktoe(kfix)
if fx == 2:
ei = e_fix + en
return etok(ei)
elif fx == 1:
ei = e_fix - en
return etok(ei)
else:
raise ValueError('Invalid FX value: 2 for fix kf, 1 for fix ki, got %d' % fx)
def _number_to_scan(num):
if isinstance(num, int):
return '{:06d}'.format(num)
else:
return num
def _parse_flatcone_line(line):
data = np.array([_nan_int(x) for x in line.split()])
array = np.reshape(data, (-1, len(EF_LIST)))[0: -1, :] # throws out last line which is only artifact
ang_channels = np.asarray([np.arange(1, NUM_CHANNELS + 1)]).T # starts at 1 to match stickers
array_with_ch_no = np.hstack([ang_channels, array])
dataframe_flatcone = pd.DataFrame(data=array_with_ch_no, columns=['aCh', 'e1', 'e2', 'e3', 'e4', 'e5'])
dataframe_flatcone.set_index('aCh', inplace=True)
return dataframe_flatcone
def _parse_param_line(line):
line_name = line[0:5]
line_body = line[6:].strip()
if line_name == 'COMND':
no_points = int(re.findall('(?<=NP)[\s\t0-9]*', line_body)[0].strip())
return line_name, {'value': line_body, 'NP': no_points}
elif '=' not in line_body:
return line_name, line_body
else:
equations = line_body.split(',')
line_dict = {}
for eq in equations:
param_name, value_raw = [x.strip() for x in eq.split('=')]
try:
value = _nan_float(value_raw)
except ValueError:
value = value_raw
line_dict[param_name] = value
return line_name, line_dict
def parse_ill_data(file_object, start_flag='DATA_:\n'):
"""
Parses ILL TASMAD scan files.
:param file_object: Handle to opened file or stream. Or alternately path to scan file.
:param start_flag: Start flag of data section. Omit for default.
:return: (header_dict, dataframe)
"""
# first parse headers
try:
file_object.seek(0, 0)
except AttributeError:
file_object = open(file_object, 'r')
text_data = file_object.read()
headers = re.findall('^[A-Z_]{5}:.*', text_data, re.MULTILINE)
header_dict = defaultdict(dict)
for line in headers:
line_name, line_body = _parse_param_line(line)
if type(line_body) is dict:
header_dict[line_name].update(line_body)
else:
header_dict[line_name].update({'value': line_body})
# then parse scan parameters and counts
data_section = text_data[text_data.find(start_flag) + len(start_flag) + 1:]
column_names = data_section.splitlines()[0].split()
# line only w 0-9, . -, spc, tab
parameters_text_lines = re.findall('^[0-9*\-\s\t.]+?$', data_section, re.MULTILINE)
parameters_value_array = np.asarray([[_nan_float(num) for num in line.split()] for line in parameters_text_lines])
data_frame = pd.DataFrame(data=parameters_value_array, columns=column_names)
data_frame['PNT'] = data_frame['PNT'].astype('int16')
df_clean = data_frame.T.drop_duplicates().T
# parse flatcone data if present
flat_all = re.findall('(?<=flat: )[0-9w\s\t\n*]+(?=endflat)', text_data, re.MULTILINE)
flat_number_lines = len(flat_all)
if len(df_clean) == 0:
raise ValueError('file %s does contain any data.' % file_object.name)
if len(df_clean) - flat_number_lines <= 1: # sanity check: only 1 missing flatcone line is acceptable
flat_frames = []
for nth, line in enumerate(flat_all):
try:
flat_frames.append(_parse_flatcone_line(line))
except ValueError:
raise ValueError('point %d in file %s is faulty.' % (nth + 1, file_object.name))
if len(df_clean) - flat_number_lines == 1:
df_clean.drop(df_clean.index[-1], inplace=True) # if only one line is missing then just drop last line
df_clean = df_clean.assign(flat=flat_frames)
else:
pass
return dict(header_dict), df_clean
def ub_from_header(scan_header):
# type: ((dict, Scan)) -> UBMatrix
"""
Make a UBMatrix object from TASMAD scan header.
:param scan_header:
:return: UBMatrix object
"""
if isinstance(scan_header, Scan):
scan_header = scan_header.header
param = scan_header['PARAM']
lattice_parameters = [param['AS'], param['BS'], param['CS'], param['AA'], param['BB'], param['CC']]
hkl1 = [float(param['AX']), float(param['AY']), float(param['AZ'])]
hkl2 = [float(param['BX']), float(param['BY']), float(param['BZ'])]
ub_matrix = UBMatrix(lattice_parameters, hkl1, hkl2)
return ub_matrix
class Scan(object):
"""
Reads a TASMAD scan file, extracts metadata and do essential conversions. Assumes const-Ei scan!
Usually not instantiated on its own. Use read_mf_scan() or read_mf_scans() instead.
"""
def __init__(self, file_name, ub_matrix=None, intensity_matrix=None, a3_offset=0.0, a4_offset=0.0):
"""
Scan object.
:param file_name: File name of TASMAD scan file.
:param ub_matrix: UBMatrix object to be used. Omit to generate from file header.
:param intensity_matrix: Intensity correction matrix to be used. Omit to use default.
:return: Scan object.
Examples:
>>> import multiflexxlib as mfl
>>> s1 = mfl.Scan('068577') # opens scan file 068577
>>> s2 = mfl.Scan(68577) # also possible to provide filename in number form. Will be padded to full length.
>>> u = mfl.UBMatrix([4.05, 4.05, 4.05, 90, 90, 90], [1, 0, 0], [0, 0, 1])
>>> s3 = mfl.Scan(68577, ub_matrix=u, a3_offset=1.2) # Applies a custom UBMatrix and add 1.2 degrees to all A3
angles.
>>> s3.a3_offset = 1.95 # a3_offset and a4_offset can be set after creation.
"""
file_name = _number_to_scan(file_name)
f = open(file_name)
self.header, self.data = parse_ill_data(f)
self.file_name = os.path.abspath(file_name)
self._a3_offset = a3_offset
self._a4_offset = a4_offset
self._apply_offsets(a3_offset, a4_offset)
if 'flat' not in self.data.columns:
raise AttributeError('%s does not contain MultiFLEXX data.' % file_name)
elif 'A3' not in self.header['STEPS'].keys():
raise AttributeError('%s is not A3 scan.' % file_name)
elif 'EI' in self.header['STEPS'].keys():
raise AttributeError('%s is not a const-E scan.' % file_name)
if intensity_matrix:
self.intensity_matrix = intensity_matrix
else:
self.intensity_matrix = INTENSITY_COEFFICIENT
if not ub_matrix:
self.ub_matrix = ub_from_header(self.header)
else:
self.ub_matrix = ub_matrix
self.converted_dataframes = []
self._update_data_array()
print('finished loading %s, a3_offset = %.2f, a4_offset = %.2f' %
(file_name, self.a3_offset, self.a4_offset))
@property
def ki(self):
try:
ki = self.data.iloc[0]['KI']
except KeyError:
try:
ki = etok(self.data.iloc[0]['EI'])
except KeyError:
ki = _extract_ki_from_header(self.header['POSQE']['EN'], self.header['PARAM']['FX'],
self.header['PARAM']['KFIX'])
return ki
@property
def tt(self):
try:
tt = self.data.iloc[-1]['TT'] # takes final value as signature value for the scan
except KeyError:
tt = None
return tt
@property
def mag(self):
try:
mag = self.data.iloc[-1]['MAG']
except KeyError:
mag = None
return mag
@property
def ei(self):
"""
Initial Energy (Ei) of scan.
:return: Ei in meV
"""
return ktoe(self.ki)
@property
def np_planned(self):
"""
Total planned points in scan based on command.
:return: Integer steps.
"""
return self.header['COMND']['NP']
@property
def np_actual(self):
"""
Actual finished points. Different from planned if scan is unfinished.
:return: Integer steps.
"""
return len(self.data)
@property
def scan_number(self):
"""
Scan number.
:return: String of scan file name, which should be numeric for TASMAD files.
"""
return os.path.split(self.file_name)[1]
@property
def a3_offset(self):
return self._a3_offset
@property
def a4_offset(self):
return self._a4_offset
@a3_offset.setter
def a3_offset(self, value):
a3_offset_old = self.a3_offset
a3_offset_new = value
a3_add = a3_offset_new - a3_offset_old
self._apply_offsets(a3_add, 0.0)
self._update_data_array()
self._a3_offset = a3_offset_new
@a4_offset.setter
def a4_offset(self, value):
a4_offset_old = self.a3_offset
a4_offset_new = value
a4_add = a4_offset_new - a4_offset_old
self._apply_offsets(0.0, a4_add)
self._update_data_array()
self._a4_offset = a4_offset_new
@property
def planned_locus_list(self):
kf_list = [etok(e) for e in EF_LIST]
a3_start, a3_end_actual, a3_end_planned = self.a3_ranges
a4_start, a4_end_actual, a4_end_planned = self.a4_ranges
return [calculate_locus(self.ki, kf, a3_start, a3_end_planned, a4_start, a4_end_planned,
self.ub_matrix, expand_a3=True) for kf in kf_list]
@property
def actual_locus_list(self):
kf_list = [etok(e) for e in EF_LIST]
a3_start, a3_end_actual, a3_end_planned = self.a3_ranges
a4_start, a4_end_actual, a4_end_planned = self.a4_ranges
return [calculate_locus(self.ki, kf, a3_start, a3_end_actual, a4_start, a4_end_actual,
self.ub_matrix) for kf in kf_list]
def _apply_offsets(self, a3_offset, a4_offset):
self.data.A3 = self.data.A3 + a3_offset
self.data.A4 = self.data.A4 + a4_offset
def _update_data_array(self):
num_ch = NUM_CHANNELS
channel_separation = CHANNEL_SEPARATION
num_flat_frames = len(self.data)
# an numpy array caching a3, a4 angles and monitor counts, shared across all energy channels
a3_a4_mon_array = np.zeros([num_flat_frames * num_ch, 3])
a4_angle_mask = np.linspace(-channel_separation * (num_ch - 1) / 2,
channel_separation * (num_ch - 1) / 2, num_ch)
for i in range(num_flat_frames):
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 0] = self.data.loc[i, 'A3']
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 1] = self.data.loc[i, 'A4'] + a4_angle_mask
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 2] = self.data.loc[i, 'M1']
data_template = pd.DataFrame(index=range(num_flat_frames * num_ch),
columns=['A3', 'A4', 'MON', 'px', 'py', 'pz', 'h', 'k', 'l',
'counts', 'valid', 'coeff', 'ach', 'point'], dtype='float64')
data_template.loc[:, ['A3', 'A4', 'MON']] = a3_a4_mon_array
self.converted_dataframes = [data_template.copy() for _ in range(len(EF_LIST))]
for ef_channel_num, ef in enumerate(EF_LIST):
qs = self.ub_matrix.angle_to_q(self.ki, etok(ef), a3_a4_mon_array[:, 0], a3_a4_mon_array[:, 1])
self.converted_dataframes[ef_channel_num].loc[:, ['px', 'py', 'pz']] = self.ub_matrix.convert(qs, 'sp').T
self.converted_dataframes[ef_channel_num].loc[:, ['h', 'k', 'l']] = self.ub_matrix.convert(qs, 'sr').T
coefficient = INTENSITY_COEFFICIENT
detector_working = DETECTOR_WORKING
for ef_channel_num in range(len(EF_LIST)):
dataframe = self.converted_dataframes[ef_channel_num]
counts = np.zeros(num_ch * num_flat_frames, dtype='float64')
valid = np.zeros(num_ch * num_flat_frames, dtype='float64')
coeff = np.zeros(num_ch * num_flat_frames, dtype='float64')
point = np.zeros(num_ch * num_flat_frames, dtype='float64')
ach = np.zeros(num_ch * num_flat_frames, dtype='float64')
for point_num in range(num_flat_frames):
flatcone_array = np.asarray(self.data.loc[point_num, 'flat'])
# START direct access to DataFrame
# rows = slice(point_num * num_ch, (point_num + 1) * num_ch - 1, None)
# dataframe.at[rows, 'counts'] = flatcone_array[:, ef_channel_num]
# dataframe.at[rows, 'valid'] = detector_working[:, ef_channel_num]
# dataframe.at[rows, 'coeff'] = coefficient[:, ef_channel_num]
# dataframe.at[rows, 'point'] = self.data.loc[point_num, 'PNT']
# dataframe.at[rows, 'ach'] = range(1, num_ch + 1)
# END direct access to DataFrame
# Buffer results into ndarray first, 4x faster than direct access, for some reason.
rows = slice(point_num * num_ch, (point_num + 1) * num_ch, None)
counts[rows] = flatcone_array[:, ef_channel_num]
valid[rows] = detector_working[:, ef_channel_num]
coeff[rows] = coefficient[:, ef_channel_num]
point[rows] = self.data.loc[point_num, 'PNT']
ach[rows] = range(1, num_ch + 1)
dataframe.counts = counts
dataframe.valid = valid
dataframe.coeff = coeff
dataframe.point = point
dataframe.ach = ach
@property
def a3_ranges(self):
a3_start = self.data.iloc[0]['A3']
a3_end_actual = self.data.iloc[-1]['A3']
try:
a3_end_planned = self.header['VARIA']['A3'] + \
self.header['STEPS']['A3'] * (self.header['COMND']['NP'] - 1) + self._a3_offset
except KeyError:
a3_end_planned = a3_end_actual
return a3_start, a3_end_actual, a3_end_planned
@property
def a4_ranges(self):
a4_start = self.header['VARIA']['A4'] + self._a4_offset # A4 is not necessarily outputted in data
if 'A4' not in self.header['STEPS']:
a4_end_planned = a4_start
a4_end_actual = a4_start
else:
a4_end_planned = self.header['VARIA']['A4'] + \
self.header['STEPS']['A4'] * (self.header['COMND']['NP'] - 1) + self._a4_offset
a4_end_actual = self.data.iloc[-1]['A4']
return a4_start, a4_end_actual, a4_end_planned
def to_csv(self, file_name=None, channel=None):
raise NotImplementedError('Not yet implemented, please export from BinnedData class instead.')
def make_bin_edges(values, tolerance=0.2, strategy=BIN_ADAPTIVE, detect_diffuse=True):
# type: ((list, pd.Series), float) -> list
"""
:param values: An iterable list of all physical quantities, repetitions allowed.
:param tolerance: maximum difference in value for considering two points to be the same.
:param strategy: (str, iterable) 'adaptive' to bin points based on proximity, 'regular' to bin points into a regular
set of bins. Provide an iterable to manually set bin EDGES.
:param detect_diffuse: Raise an exception if a bin is striding over a diffuse group of points.
:return: a list of bin edges
Walks through sorted unique values, if a point is further than tolerance away from the next, a bin edge is
dropped between the two points, otherwise no bin edge is added. A beginning and ending edge is added at
tolerance / 2 further from either end.
"""
if isinstance(strategy, str):
if strategy == BIN_ADAPTIVE:
values_array = np.asarray(values).ravel()
unique_values = np.asarray(list(set(values_array)))
unique_values.sort()
bin_edges = [unique_values[0] - tolerance / 2] # First bin edge should be to the 'left' of smallest value.
current_walk = 0
for i in range(len(unique_values) - 1):
if unique_values[i+1] - unique_values[i] > tolerance: # New bin edge if two points further than tol.
bin_edges.append((unique_values[i] + unique_values[i+1]) / 2)
current_walk = 0
else:
# Keep track of how much this bin is spanning.
current_walk = current_walk + unique_values[i+1] - unique_values[i]
if current_walk > 2 * tolerance and detect_diffuse:
raise ValueError('Bin edge creation failed due to diffuse clustering of values.')
bin_edges.append(unique_values[-1] + tolerance / 2)
return bin_edges
elif strategy == BIN_REGULAR:
values_array = np.asarray(values).ravel()
unique_values = np.asarray(list(set(values_array)))
unique_values.sort()
bin_edges = list(np.arange(unique_values[0] - tolerance / 2, unique_values[-1], tolerance))
bin_edges.append(unique_values[-1] + tolerance / 2)
return bin_edges
else:
raise ValueError('Invalid binning strategy provided: (\'%s \', \'%s\', list) expected, got %s' %
(BIN_ADAPTIVE, BIN_REGULAR, strategy))
else: # if strategy is not a string
return [x for x in strategy] # it will at least be an iterable
def _merge_locus(locus_list):
clipper = pyclipper.Pyclipper()
for locus in locus_list:
clipper.AddPath(pyclipper.scale_to_clipper(locus), pyclipper.PT_SUBJECT)
merged_locus = pyclipper.scale_from_clipper(clipper.Execute(pyclipper.CT_UNION, pyclipper.PFT_NONZERO))
return merged_locus
def _merge_scan_points(data_frames, a3_tolerance=0.2, a4_tolerance=0.2, a3_bins=BIN_ADAPTIVE, a4_bins=BIN_ADAPTIVE):
"""
Bins actual detector counts together from multiple runs.
:param data_frames: Pandas data frames from Scan objects.
:param a3_tolerance: Max angle difference before two A3 angles are considered discreet.
:param a4_tolerance: See a3_tolerance.
:return: An intermediate data structure even I don't really remember.
"""
joined_frames = pd.concat(data_frames, axis=0, ignore_index=True)
joined_frames = joined_frames.assign(counts_norm=joined_frames.counts/joined_frames.coeff)
joined_frames = joined_frames.drop(joined_frames[joined_frames.valid != 1].index) # delete dead detectors
a3_cuts = bin_and_cut(joined_frames.A3, tolerance=a3_tolerance, strategy=a3_bins)
try:
a4_cuts = bin_and_cut(joined_frames.A4, tolerance=a4_tolerance, strategy=a4_bins)
result = _decoupled_angle_merge(joined_frames, a3_cuts, a4_cuts)
return result
except ValueError as err: # If A4 is diffused across entire range due to small yet non-zero A4 step.
if type(a4_bins) is str:
if a4_bins == BIN_ADAPTIVE: # Decided not to rely on 'and' condition shortcut.
result = _coupled_angle_merge(joined_frames, a3_tolerance, a3_bins, a4_tolerance, a4_bins)
return result
raise err
def _decoupled_angle_merge(joined_frames, a3_cuts, a4_cuts):
# helper function for merging scan points. Used if A3 and A4 angles can be binned independently.
group = joined_frames.groupby([a3_cuts, a4_cuts])
sums = group[['counts', 'counts_norm', 'MON']].sum()
means = group[['A3', 'A4', 'px', 'py', 'pz', 'h', 'k', 'l']].mean()
error_bars = np.sqrt(sums.counts)
per_monitor = sums.counts_norm / sums.MON
result = pd.concat([sums, means], axis=1)
result = result.assign(err=error_bars, permon=per_monitor)
result = result.dropna().reset_index(drop=True)
return result
def _coupled_angle_merge(joined_frames, a3_tolerance, a3_bins, a4_tolerance, a4_bins):
# Used if A4 angle has a non-zero step that is smaller than precision, and there are enough steps to make A4 angles
# seem 'continuous'. MUCH SLOWER than decoupled binning!
a3_bin_edges = make_bin_edges(joined_frames.A3, tolerance=a3_tolerance, strategy=a3_bins)
fragments = []
for i in range(len(a3_bin_edges) - 1):
a3_left = a3_bin_edges[i]
a3_right = a3_bin_edges[i+1]
filtered = joined_frames.loc[joined_frames.A3.between(a3_left, a3_right)]
a4_cuts = bin_and_cut(filtered.A4, tolerance=a4_tolerance, strategy=a4_bins)
group = filtered.groupby([a4_cuts])
sums = group[['counts', 'counts_norm', 'MON']].sum()
means = group[['A3', 'A4', 'px', 'py', 'pz', 'h', 'k', 'l']].mean()
error_bars = np.sqrt(sums.counts)
per_monitor = sums.counts_norm / sums.MON
fragment = pd.concat([sums, means], axis=1)
fragment = fragment.assign(err=error_bars, permon=per_monitor)
fragment = fragment.dropna().reset_index(drop=True)
fragments.append(fragment)
result = pd.concat(fragments, axis=0).reset_index(drop=True)
return result
def bin_and_cut(data, tolerance=0.2, strategy=BIN_ADAPTIVE, detect_diffuse=True):
# type: (pd.Series, float) -> Categorical
"""
Applies adaptive binning and return a pandas.Categorical cut object
:param data: a series or list of numbers. Repetition allowed.
:param tolerance: Binning tolerance.
:param strategy: 'adaptive', 'regular' or a list describing bin edges.
:param detect_diffuse: Detect of the values are semi-continuous and cannot be cut into bins using adaptive mode.
:return: pd.cut
"""
bin_edges = make_bin_edges(data, tolerance, strategy=strategy, detect_diffuse=detect_diffuse)
cut = pd.cut(data, bin_edges)
return cut
def series_to_binder(items):
"""
Helper function for converting list to _DataBinder object. The _DataBinder class is just for overriding str method.
:param items: Anything that makes sense with list(items).
:return:
"""
# type: (pd.Series)->_DataBinder
return _DataBinder(list(items))
def bin_scans(list_of_data, # type: ['Scan']
nan_fill=0, ignore_ef=False,
en_tolerance=0.05, tt_tolerance=1.0, mag_tolerance=0.05, a3_tolerance=0.2, a4_tolerance=0.2,
en_bins=BIN_ADAPTIVE, tt_bins=BIN_ADAPTIVE, mag_bins=BIN_ADAPTIVE, a3_bins=BIN_ADAPTIVE,
a4_bins=BIN_ADAPTIVE,
angle_voronoi=False):
# type: (...)-> BinnedData
"""
Bin raw Scan objects into BinnedData object.
:param list_of_data: a list of Scan objects.
:param nan_fill: how to deal NaNs in metadata such as temperature. Default is fill 0.
:param ignore_ef: Not implemented, default is False.
:param en_tolerance: Energy binning tolerance.
:param tt_tolerance: Temperature binning tolerance.
:param mag_tolerance: Magnetic field binning tolerance.
:param a3_tolerance: A3 angle binning tolerance of data points.
:param a4_tolerance: A4 angle binning tolerance of data points.
:param en_bins: (str, iterable) Strategy for bin creation. 'adaptive' to bin points based on proximity; 'regular'
creates a regular grid of bins. Provide an iterable to manually set bin EDGES.
:param mag_bins: see en_bins.
:param tt_bins: see en_bins.
:param a3_bins: see en_bins.
:param a4_bins: see en_bins.
:param angle_voronoi: Performs Voronoi partition in angle plane instead of reciprocal plane.
:return: BinnedData object.
"""
all_data = pd.DataFrame(index=range(len(list_of_data) * len(EF_LIST)),
columns=['name', 'ei', 'ef', 'en', 'tt', 'mag', 'points', 'locus_a', 'locus_p'],
dtype=np.object)
file_names = [data.file_name for data in list_of_data]
for i, scan in enumerate(list_of_data):
for j in range(len(EF_LIST)):
ef = EF_LIST[j]
all_data.loc[i * len(EF_LIST) + j, ['name', 'ei', 'ef', 'en']] = [scan.file_name, scan.ei, ef, scan.ei - ef]
all_data.loc[i * len(EF_LIST) + j, ['tt', 'mag']] = [scan.tt, scan.mag]
all_data.loc[i * len(EF_LIST) + j]['points'] = scan.converted_dataframes[j]
all_data.loc[i * len(EF_LIST) + j]['locus_a'] = scan.actual_locus_list[j]
all_data.loc[i * len(EF_LIST) + j]['locus_p'] = scan.planned_locus_list[j]
all_data = all_data.fillna(nan_fill)
cut_ei = bin_and_cut(all_data.ei, en_tolerance, strategy=en_bins)
cut_en = bin_and_cut(all_data.en, en_tolerance, strategy=en_bins)
cut_tt = bin_and_cut(all_data.tt, tt_tolerance, strategy=tt_bins)
cut_mag = bin_and_cut(all_data.mag, mag_tolerance, strategy=mag_bins)
if ignore_ef:
raise NotImplementedError('For the love of god do not try to mix data from different final energies!')
else:
grouped = all_data.groupby([cut_ei, cut_en, cut_tt, cut_mag])
grouped_meta = grouped[['ei', 'ef', 'en', 'tt', 'mag']].mean()
grouped_data = grouped['points'].\
apply(series_to_binder).\
apply(lambda x: _MergedDataPoints(x, a3_tolerance, a4_tolerance, a3_bins, a4_bins) if np.any(pd.notna(x)) else np.NaN)
grouped_locus_a = grouped['locus_a'].\
apply(series_to_binder).apply(lambda x: _MergedLocus(x) if np.any(pd.notna(x)) else np.NaN)
grouped_locus_p = grouped['locus_p'].\
apply(series_to_binder).apply(lambda x: _MergedLocus(x) if np.any(pd.notna(x)) else np.NaN)
joined = pd.concat([grouped_meta, grouped_data, grouped_locus_a, grouped_locus_p], axis=1)
index_reset = joined.dropna().reset_index(drop=True)
return BinnedData(index_reset, file_names=file_names, ub_matrix=list_of_data[0].ub_matrix,
angle_voronoi=angle_voronoi)
def read_mf_scan(filename, ub_matrix=None, intensity_matrix=None, a3_offset=0.0, a4_offset=0.0):
# type: (str, UBMatrix, np.ndarray, float ,float) -> Scan
"""
Reads TASMAD scan files.
:param filename: TASMAD file name to read.
:param ub_matrix: UBMatrix to be used. Omit to generate automatically.
:param intensity_matrix: Int. matrix to use. Omit to use default.
:param a3_offset: Value to be added to A3 angles in this scan file.
:param a4_offset: Value to be added to A4 angles in this scan file.
:return: Scan object
"""
scan_object = Scan(filename, ub_matrix, intensity_matrix, a3_offset=a3_offset, a4_offset=a4_offset)
return scan_object
def read_mf_scans(filename_list=None, # type: ['str']
ub_matrix=None, intensity_matrix=None, processes=1, a3_offset=None, a4_offset=None):
"""
# type: (...) -> ['Scan']
Reads TASMAD scan files.
:param filename_list: A list of TASMAD file names to read. User will be prompted for a folder if omitted.
:param ub_matrix: UBMatrix to be used. Omit to generate automatically.
:param intensity_matrix: Int. matrix to use. Omit to use default.
:param processes: Number of processes.
:param a3_offset: Number, list or None. Will be added to A3 angles if provided. Each element will be added to
corresponding scan file if a list is provided. List length must match number of files.
:param a4_offset: Number, list or None. Will be added to A4 angles if provided. Each element will be added to
corresponding scan file if a list is provided. List length must match number of files.
:return: A list containing resulting Scan objects.
"""
if filename_list is None:
path = ask_directory('Folder containing data')
filename_list = list_flexx_files(path)
if len(filename_list) == 0:
raise FileNotFoundError('No file to read.')
a3_offset_list = _expand_offset_parameter(a3_offset, filename_list)
a4_offset_list = _expand_offset_parameter(a4_offset, filename_list)
arg_list = []
for name, a3o, a4o in zip(filename_list, a3_offset_list, a4_offset_list):
arg_list.append((name, ub_matrix, intensity_matrix, a3o, a4o))
if processes > 1:
pool = mp.Pool(processes=processes)
data_list = pool.starmap(read_mf_scan, arg_list)
else:
data_list = list(itertools.starmap(read_mf_scan, arg_list))
return data_list
def _expand_offset_parameter(param, filename_list):
length = len(filename_list)
if param is None:
return [0.0 for _ in range(length)]
elif isinstance(param, (int, float)):
return [param for _ in range(length)]
elif isinstance(param, (list, tuple)):
if len(filename_list) == len(param):
return param
else:
raise ValueError('Offset list length and number of files mismatch.')
elif isinstance(param, dict):
param_filtered = {_number_to_scan(key): param[key] for key in param.keys()}
offset_list = []
for filename in filename_list:
filename = os.path.split(filename)[1]
try:
offset_list.append(param_filtered[filename])
except KeyError:
offset_list.append(0.0)
return offset_list
else:
raise TypeError('Offset should be either None, a number, a list or a dict.')
def read_and_bin(filename_list=None, ub_matrix=None, intensity_matrix=None, processes=1,
en_tolerance=0.05, tt_tolerance=1.0, mag_tolerance=0.05, a3_tolerance=0.2, a4_tolerance=0.2,
en_bins=BIN_ADAPTIVE, tt_bins=BIN_ADAPTIVE, mag_bins=BIN_ADAPTIVE, a3_bins=BIN_ADAPTIVE,
a4_bins=BIN_ADAPTIVE,
a3_offset=None, a4_offset=None, angle_voronoi=False):
"""
Reads and bins MultiFLEXX scan files.
:param filename_list: A list containing absolute or relative paths of TASMAD scan files to read.
Integer type elements will be padded to full FLEXX scan file names. User will be prompted to choose a directory if
omitted.
:param ub_matrix: UBMatrix object to be used. Omit to generate from data headers.
:param intensity_matrix: Intensity correction matrix to be used. Omit to use the default one.
:param processes: Number of processes to use.
:param en_tolerance: Energy tolerance before two values are considered discrete, default to 0.05meV.
:param tt_tolerance: Temperature tolerance, default to 1.0K.
:param mag_tolerance: Magnetic field tolerance, default to 0.05T.
:param a3_tolerance: A3 angle tolerance, default is 0.2deg.
:param a4_tolerance: A4 angle tolerance, default is 0.2deg.
:param en_bins: (str, list) Strategy for bin creation. 'adaptive' to bin points based on proximity; 'regular'
creates a regular grid of bins.
:param mag_bins: see en_bins.
:param tt_bins: see en_bins.
:param a3_bins: see en_bins.
:param a4_bins: see en_bins.
:param a3_offset: Angle value to be added into raw A3 angles, in degrees.
:param a4_offset: Angle value to be added into raw A4 angles, in degrees.
:param angle_voronoi: Whether to perform Voronoi tessellation in angles instead of Q-coordinates.
:return: BinnedData object.
Examples:
>>> import multiflexxlib as mfl
>>> df1 = mfl.read_and_bin() # Prompts for a path, reads and bins all found data.
>>> u = mfl.UBMatrix([4.05, 4.05, 4.05, 90, 90, 90], [1, 0, 0], [0, 0, 1]) # creates an UBMatrix
>>> df2 = mfl.read_and_bin(ub_matrix=u) # loads data but apply supplied UBMatrix instead of auto generation.
>>> df3 = mfl.read_and_bin(a3_offset=1.2, a4_tolerance=0.4) # There is an A3 angle offset and A4 angle error
due to aging Tanzboden. We wish to loosen A4 angle binning tolerance. Apply these numbers to loaded data.
>>> df4 = mfl.read_and_bin(a3_tolerance=1, a3_bins='regular') # A3 rotation is a huge mess and lands on large,
# random error. Falls back to regular bins using 'regular' bin mode.
>>> df5 = mfl.read_and_bin(angle_voronoi=True) # Performs Voronoi partition in angle space instead of Q-space.
# Useful when you need regions with identical angle values fully line up.
"""
if filename_list is None:
items = read_mf_scans(filename_list, ub_matrix, intensity_matrix, processes, a3_offset, a4_offset)
else:
if isinstance(filename_list, list):
items = read_mf_scans(filename_list, ub_matrix, intensity_matrix, processes, a3_offset, a4_offset)
elif os.path.isdir(filename_list):
filename_list = list_flexx_files(filename_list)
items = read_mf_scans(filename_list, ub_matrix, intensity_matrix, processes, a3_offset, a4_offset)
else:
raise ValueError('Got a parameter that is neither a list nor a directory (got %s)' % str(filename_list))
df = bin_scans(items, en_tolerance=en_tolerance, tt_tolerance=tt_tolerance, mag_tolerance=mag_tolerance,
a3_tolerance=a3_tolerance, a4_tolerance=a4_tolerance, en_bins=en_bins, tt_bins=tt_bins,
mag_bins=mag_bins, a3_bins=a3_bins, a4_bins=a4_bins, angle_voronoi=angle_voronoi)
return df
class _DataBinder(list):
"""
Helper class to override __str__ behaviour.
"""
def __str__(self):
return '%d items' % len(self)
class _MergedLocus(list):
"""
Helper class to override __str__ behaviour.
"""
def __init__(self, items):
# type: (_DataBinder) -> None
binned_locus = _merge_locus(items)
super(_MergedLocus, self).__init__(binned_locus)
def __str__(self):
patches = len(self)
total_vertices = float(np.sum([len(patch) for patch in self]))
return '%dp %dv' % (patches, total_vertices)
class _MergedDataPoints(pd.DataFrame):
# Helper class to override __str__ behaviour.
def __init__(self, items, a3_tolerance=0.2, a4_tolerance=0.2, a3_bins=BIN_ADAPTIVE, a4_bins=BIN_ADAPTIVE):
# type: (_DataBinder, float) -> None
binned_points = _merge_scan_points(items, a3_tolerance=a3_tolerance, a4_tolerance=a4_tolerance,
a3_bins=a3_bins, a4_bins=a4_bins)
super(_MergedDataPoints, self).__init__(binned_points)
def __str__(self):
return '%d pts' % len(self)
class BinnedData(object):
def __init__(self, source_dataframe, file_names, ub_matrix=None, angle_voronoi=False):
# type: (pd.DataFrame, [str], UBMatrix) -> None
"""
Should not be instantiated on its own.
:param source_dataframe:
:param file_names:
:param ub_matrix:
"""
self._file_names = file_names
self.data = source_dataframe
self.ub_matrix = ub_matrix
if 'voro' not in self.data.columns:
self.data.loc[:, 'voro'] = pd.Series([[] for _ in self.data.index], index=self.data.index)
self.data.voro = self.data.voro.astype(object)
self.update_voronoi(angle_voronoi=angle_voronoi)
self.angle_voronoi = angle_voronoi
def file_names(self):
"""
Files used in this dataset.
:return: List of strings.
"""
return self._file_names
def __str__(self):
return str(pd.concat((self.data[['ei', 'en', 'ef', 'tt', 'mag']],
self.data[['locus_a', 'locus_p', 'points']].astype('str')), axis=1))
def update_voronoi(self, indices=None, angle_voronoi=False):
"""
Update Voronoi tessellation polygons.
:param indices: Which entries to update. Omit to update all.
:param angle_voronoi: Whether to perform Voronoi tessellation in angles instead of absolute reciprocal lengths.
:return: None
"""
if indices is None:
indices = self.data.index
elif isinstance(indices, int):
indices = [indices]
else:
raise TypeError('Index must be a list or a number or None')
if not angle_voronoi:
for ind in indices:
points = self.data.loc[ind, 'points']
list_of_polygons = plotting.voronoi_polygons(points['px'], points['py'],
self.ub_matrix.figure_aspect, max_cell=0.2)
self.data.loc[ind, 'voro'][:] = []
self.data.loc[ind, 'voro'].extend(list_of_polygons)
else:
for ind in indices:
points = self.data.loc[ind, 'points']
angle_to_q = self.ub_matrix.angle_to_q
lop_angle = plotting.voronoi_polygons(points['A3'], points['A4'],
self.ub_matrix.figure_aspect, max_cell=2.5)
lop_p = [angle_to_q(etok(self.data.ei[ind]), etok(self.data.ef[ind]),
poly[:, 0], poly[:, 1], system='p') for poly in lop_angle]
lop_p_filtered = [poly.T[:, 0:2] for poly in lop_p]
self.data.loc[ind, 'voro'][:] = []
self.data.loc[ind, 'voro'].extend(lop_p_filtered)
def cut_voronoi(self, start, end, subset=None, label_precision=2, labels=None, monitor=True, plot=True):
"""
1D-cut through specified start and end points by cutting through Voronoi tessellation polygons.
:param start: starting point in r.l.u., vector.
:param end: ending point in r.l.u., vector.
:param subset: a list of indices to cut. Omit to cut all available data.
:param label_precision: refer to make_label method.
:param labels: refer to make_label method.
:param monitor: if normalize by monitor count.
:param plot: if spawn a plot automatically.
:return: ECut object.
"""
start_p = self.ub_matrix.convert(start, 'rp')[0:2]
end_p = self.ub_matrix.convert(end, 'rp')[0:2]
seg = np.vstack([start_p, end_p])
if subset is None:
subset = self.data.index
cut_results = []
point_indices = []
list_bin_polygons = []
for index in subset:
df = self.data.loc[index, 'points']
voro = self.data.loc[index, 'voro']
included = plotting.segment_intersect_polygons(seg, voro)
bin_polygons = [v for v, include in zip(voro, included) if include]
list_bin_polygons.append(bin_polygons)
df_filtered = df.loc[included]
point_indices.append(df_filtered.index)
points = df_filtered[['px', 'py']]
if monitor:
intensities = df_filtered['permon']
else:
intensities = df_filtered['counts_norm']
yerr = intensities / np.sqrt(df_filtered['counts'])
percentiles = plotting.projection_on_segment(np.asarray(points), seg, self.ub_matrix.figure_aspect)
result = | pd.DataFrame({'x': percentiles, 'y': intensities, 'yerr': yerr, 'bins': bin_polygons}) | pandas.DataFrame |
import pandas as pd
from collections import OrderedDict
import frappe
# TODO
# 1. create a transaction doctype list
# 2. Get all transactions
# 3. Sort all transactios by their posting dates
# 4.
Transaction_Type_List = [
'Purchase Invoice',
'Sales Invoice',
'Stock Entry',
'Delivery Note',
'Purchase Receipt',
'Sales Order',
'Purchase Order',
'Production Plan',
'Material Request',
'Work Order',
'Job Card'
]
Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def get_all_transactions(month):
all_transactions = {}
all_transactions_df_dict = {}
all_transactions_clubbed_df = | pd.DataFrame() | pandas.DataFrame |
import contextlib
import os
import traceback
from itertools import chain
from typing import Any, Callable, Dict, Optional, Type
from unittest.mock import MagicMock, _CallList
import pandas as pd
import pytest
from _pytest.doctest import DoctestModule
from _pytest.python import Module
from sklearn.linear_model import LogisticRegression
from ebonite.core.objects.artifacts import Blobs, InMemoryBlob
from ebonite.core.objects.core import Model
from ebonite.core.objects.dataset_source import Dataset
from ebonite.core.objects.dataset_type import DatasetType
from ebonite.core.objects.wrapper import FilesContextManager, ModelIO, ModelWrapper
from ebonite.ext.docker.utils import is_docker_running
from ebonite.repository.artifact.local import LocalArtifactRepository
from ebonite.repository.dataset.artifact import DatasetReader, DatasetWriter
class _CallList2(_CallList):
def append(self, object) -> None:
super(_CallList2, self).append(object)
class MoreMagicMock(MagicMock):
# __fields = {'mock_call_stacks'}
def __init__(self, *args, **kwargs):
super(MoreMagicMock, self).__init__(*args, **kwargs)
self.mock_call_stacks = []
# def __setattr__(self, key, value):
# if key in self.__fields:
# object.__setattr__(self, key, value)
# else:
# super(MoreMagicMock, self).__setattr__(key, value)
#
# def __getattr__(self, item):
# if item in self.__fields:
# return self.__dict__[item]
# return super(MoreMagicMock, self).__getattr__(item)
def _mock_call(self, *args, **kwargs):
self.mock_call_stacks.append(traceback.extract_stack()[:-3])
return super(MoreMagicMock, self)._mock_call(*args, **kwargs)
@contextlib.contextmanager
def called_within_context(self, first=True, times=1):
if first:
self.assert_not_called()
times_called = self.call_count
yield
if first and times > 0:
self.assert_called()
if self.call_count != times_called + times:
frames_summary = []
for frame in self.mock_call_stacks[times_called:]:
summary = '\n'.join(f'{f.filename}:{f.lineno}' for f in frame if 'site-packages' not in f.filename)
frames_summary.append(summary)
frames_summary = '\n\n'.join(frames_summary)
raise AssertionError(f"Expected '{self._mock_name}' to have been called {times} times "
f"(got {self.call_count - times_called})\n"
f"Mock calls: \n{frames_summary}")
class MockMethod:
def __init__(self, method, proxy_mode=True):
self.proxy_mode = proxy_mode
self.method = method
@property
def __name(self):
return f'_{self.method.__name__}_mock'
def _side_effect(self, instance):
return lambda *args, **kwargs: self.method(instance, *args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
self._ensure_mock(instance)
return getattr(instance, self.__name)
def _ensure_mock(self, instance):
if self.__name not in instance.__dict__:
setattr(instance, self.__name,
MoreMagicMock(side_effect=self._side_effect(instance) if self.proxy_mode else None,
name=self.method.__name__))
def mock_method(method):
return MockMethod(method)
class MockMixin:
def __init_subclass__(cls, proxy_mode=True):
super().__init_subclass__()
cls.__original = dict()
for base in cls.mro():
for name, item in base.__dict__.items():
if name.startswith('_') or name in cls.__original or not callable(item):
continue
cls.__original[name] = item
setattr(cls, name, MockMethod(getattr(cls, name), proxy_mode))
class DummyModelIO(ModelIO):
@contextlib.contextmanager
def dump(self, model) -> FilesContextManager:
yield Blobs({'test.bin': InMemoryBlob(b'test')})
def load(self, path):
return None
class DummyModelWrapper(ModelWrapper):
def __init__(self):
super().__init__(DummyModelIO())
def _exposed_methods_mapping(self) -> Dict[str, Optional[str]]:
return {
'predict': '_predict'
}
def _predict(self, data):
return data
@pytest.fixture
def dummy_model_wrapper():
return DummyModelWrapper()
@pytest.fixture
def artifact_repository(tmpdir):
return LocalArtifactRepository(tmpdir)
@pytest.fixture
def sklearn_model_obj(pandas_data):
reg = LogisticRegression()
reg.fit(pandas_data, [1, 0])
return reg
@pytest.fixture
def pandas_data():
return | pd.DataFrame([[1, 0], [0, 1]], columns=['a', 'b']) | pandas.DataFrame |
import torch
import os
import pandas as pd
import numpy as np
from TLA.Analysis.lang_mapping import mapping
from distutils.sysconfig import get_python_lib
def analysis_table():
lang_dict = mapping()
directory = "analysis"
parent_dir = get_python_lib() + "/TLA/Analysis"
p = os.path.join(parent_dir, directory)
if os.path.isdir(p) == False:
os.mkdir(p)
df=pd.DataFrame( columns=["language","total_tweets","pos","neg","percentage_positive","percentage_negative"],dtype=float)
for filename in os.listdir(get_python_lib() +"/TLA/Datasets"):
sumpos=0
sumneg=0
f = os.path.join(get_python_lib() +"/TLA/Datasets", filename)
df1= | pd.read_csv(f) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Implement Resampler public API."""
import numpy as np
import pandas
import pandas.core.resample
from pandas._typing import (
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
)
from pandas.core.dtypes.common import is_list_like
from typing import Optional, Union
from modin.utils import _inherit_docstrings
@_inherit_docstrings(pandas.core.resample.Resampler)
class Resampler(object):
def __init__(
self,
dataframe,
rule,
axis=0,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
base=0,
on=None,
level=None,
origin: Union[str, TimestampConvertibleTypes] = "start_day",
offset: Optional[TimedeltaConvertibleTypes] = None,
):
self._dataframe = dataframe
self._query_compiler = dataframe._query_compiler
axis = self._dataframe._get_axis_number(axis)
self.resample_kwargs = {
"rule": rule,
"axis": axis,
"closed": closed,
"label": label,
"convention": convention,
"kind": kind,
"loffset": loffset,
"base": base,
"on": on,
"level": level,
"origin": origin,
"offset": offset,
}
self.__groups = self.__get_groups(**self.resample_kwargs)
def __getitem__(self, key):
"""
Get ``Resampler`` based on `key` columns of original dataframe.
Parameters
----------
key : str or list
String or list of selections.
Returns
-------
modin.pandas.BasePandasDataset
New ``Resampler`` based on `key` columns subset
of the original dataframe.
"""
def _get_new_resampler(key):
subset = self._dataframe[key]
resampler = type(self)(subset, **self.resample_kwargs)
return resampler
from .series import Series
if isinstance(
key, (list, tuple, Series, pandas.Series, pandas.Index, np.ndarray)
):
if len(self._dataframe.columns.intersection(key)) != len(set(key)):
missed_keys = list(set(key).difference(self._dataframe.columns))
raise KeyError(f"Columns not found: {str(sorted(missed_keys))[1:-1]}")
return _get_new_resampler(list(key))
if key not in self._dataframe:
raise KeyError(f"Column not found: {key}")
return _get_new_resampler(key)
def __get_groups(
self,
rule,
axis,
closed,
label,
convention,
kind,
loffset,
base,
on,
level,
origin,
offset,
):
if axis == 0:
df = self._dataframe
else:
df = self._dataframe.T
groups = df.groupby(
pandas.Grouper(
key=on,
freq=rule,
closed=closed,
label=label,
convention=convention,
loffset=loffset,
base=base,
level=level,
origin=origin,
offset=offset,
)
)
return groups
@property
def groups(self):
return self._query_compiler.default_to_pandas(
lambda df: pandas.DataFrame.resample(df, **self.resample_kwargs).groups
)
@property
def indices(self):
return self._query_compiler.default_to_pandas(
lambda df: pandas.DataFrame.resample(df, **self.resample_kwargs).indices
)
def get_group(self, name, obj=None):
if self.resample_kwargs["axis"] == 0:
result = self.__groups.get_group(name)
else:
result = self.__groups.get_group(name).T
return result
def apply(self, func, *args, **kwargs):
from .dataframe import DataFrame
if isinstance(self._dataframe, DataFrame):
query_comp_op = self._query_compiler.resample_app_df
else:
query_comp_op = self._query_compiler.resample_app_ser
dataframe = DataFrame(
query_compiler=query_comp_op(
self.resample_kwargs,
func,
*args,
**kwargs,
)
)
if is_list_like(func) or isinstance(self._dataframe, DataFrame):
return dataframe
else:
if len(dataframe.index) == 1:
return dataframe.iloc[0]
else:
return dataframe.squeeze()
def aggregate(self, func, *args, **kwargs):
from .dataframe import DataFrame
if isinstance(self._dataframe, DataFrame):
query_comp_op = self._query_compiler.resample_agg_df
else:
query_comp_op = self._query_compiler.resample_agg_ser
dataframe = DataFrame(
query_compiler=query_comp_op(
self.resample_kwargs,
func,
*args,
**kwargs,
)
)
if | is_list_like(func) | pandas.core.dtypes.common.is_list_like |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
train = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train-1542865627584.csv")
beneficiary = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Beneficiarydata-1542865627584.csv")
inpatient = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Inpatientdata-1542865627584.csv")
outpatient = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Outpatientdata-1542865627584.csv")
tt = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test-1542969243754.csv")
tb = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Beneficiarydata-1542969243754.csv")
ti = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Inpatientdata-1542969243754.csv")
to = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Outpatientdata-1542969243754.csv")
# In[3]:
df_procedures1 = pd.DataFrame(columns = ['Procedures'])
df_procedures1['Procedures'] = pd.concat([inpatient["ClmProcedureCode_1"], inpatient["ClmProcedureCode_2"], inpatient["ClmProcedureCode_3"], inpatient["ClmProcedureCode_4"], inpatient["ClmProcedureCode_5"], inpatient["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures1['Procedures'].head(10)
# In[4]:
df_procedures1.shape
# In[5]:
grouped_procedure_df = df_procedures1['Procedures'].value_counts()
grouped_procedure_df
# In[6]:
df_diagnosis = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis['Diagnosis'] = pd.concat([inpatient["ClmDiagnosisCode_1"], inpatient["ClmDiagnosisCode_2"], inpatient["ClmDiagnosisCode_3"], inpatient["ClmDiagnosisCode_4"], inpatient["ClmDiagnosisCode_5"], inpatient["ClmDiagnosisCode_6"], inpatient["ClmDiagnosisCode_7"], inpatient["ClmDiagnosisCode_8"], inpatient["ClmDiagnosisCode_9"], inpatient["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis['Diagnosis'].head(10)
# In[7]:
df_diagnosis.shape
# In[8]:
grouped_diagnosis_df = df_diagnosis['Diagnosis'].value_counts()
grouped_diagnosis_df
# In[9]:
grouped_procedure_df1 = grouped_procedure_df.to_frame()
grouped_procedure_df1
# In[10]:
grouped_procedure_df1.columns = ['count']
grouped_procedure_df1
# In[11]:
grouped_procedure_df1['Procedure'] = grouped_procedure_df1.index
grouped_procedure_df1
# In[12]:
grouped_procedure_df1['Percentage'] = (grouped_procedure_df1['count']/sum(grouped_procedure_df1['count']))*100
grouped_procedure_df1['Percentage']
# In[13]:
grouped_diagnosis_df = grouped_diagnosis_df.to_frame()
grouped_diagnosis_df.columns = ['count']
grouped_diagnosis_df['Diagnosis'] = grouped_diagnosis_df.index
grouped_diagnosis_df['Percentage'] = (grouped_diagnosis_df['count']/sum(grouped_diagnosis_df['count']))*100
grouped_diagnosis_df['Percentage']
# In[14]:
# taking only top 20
plot_procedure_df1 = grouped_procedure_df1.head(20)
plot_diagnosis_df1 = grouped_diagnosis_df.head(20)
# In[15]:
# Plotting the most commonly used diagnosis and procedures
from matplotlib import pyplot as plt
plot_procedure_df1['Procedure'] = plot_procedure_df1['Procedure'].astype(str)
plot_procedure_df1.sort_values(by=['Percentage'])
plot_procedure_df1.plot(x ='Procedure', y='Percentage', kind='bar', color ='green',
title='Procedure Distribution- Inpatient', figsize=(15,10));
# In[16]:
plot_diagnosis_df1['Diagnosis'] = plot_diagnosis_df1['Diagnosis'].astype(str)
plot_diagnosis_df1.sort_values(by=['Percentage'])
plot_diagnosis_df1.plot(x ='Diagnosis', y='Percentage', kind='bar', color ='green',
title='Diagnosis Distribution- Inpatient', figsize=(15,10));
# In[17]:
df_procedures2 = pd.DataFrame(columns = ['Procedures'])
df_procedures2['Procedures'] = pd.concat([outpatient["ClmProcedureCode_1"], outpatient["ClmProcedureCode_2"], outpatient["ClmProcedureCode_3"], outpatient["ClmProcedureCode_4"], outpatient["ClmProcedureCode_5"], outpatient["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures2['Procedures'].head(10)
# In[18]:
grouped_procedure_df2 = df_procedures2['Procedures'].value_counts()
# In[19]:
df_diagnosis2 = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis2['Diagnosis'] = pd.concat([outpatient["ClmDiagnosisCode_1"], outpatient["ClmDiagnosisCode_2"], outpatient["ClmDiagnosisCode_3"], outpatient["ClmDiagnosisCode_4"], outpatient["ClmDiagnosisCode_5"], outpatient["ClmDiagnosisCode_6"], outpatient["ClmDiagnosisCode_7"], outpatient["ClmDiagnosisCode_8"], outpatient["ClmDiagnosisCode_9"], outpatient["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis2['Diagnosis'].head(10)
grouped_diagnosis_df2 = df_diagnosis2['Diagnosis'].value_counts()
# In[20]:
grouped_procedure_df_op = grouped_procedure_df2.to_frame()
grouped_procedure_df_op.columns = ['count']
grouped_procedure_df_op['Procedure'] = grouped_procedure_df_op.index
grouped_procedure_df_op['Percentage'] = (grouped_procedure_df_op['count']/sum(grouped_procedure_df_op['count']))*100
grouped_procedure_df_op['Percentage']
# In[21]:
grouped_diagnosis_df_op = grouped_diagnosis_df2.to_frame()
grouped_diagnosis_df_op.columns = ['count']
grouped_diagnosis_df_op['Diagnosis'] = grouped_diagnosis_df_op.index
grouped_diagnosis_df_op['Percentage'] = (grouped_diagnosis_df_op['count']/sum(grouped_diagnosis_df_op['count']))*100
grouped_diagnosis_df_op['Percentage']
# In[22]:
# taking only top 20
plot_procedure_df2 = grouped_procedure_df_op.head(20)
plot_diagnosis_df2 = grouped_diagnosis_df_op.head(20)
# In[23]:
# Plotting the most commonly used diagnosis and procedures
from matplotlib import pyplot as plt
plot_procedure_df2['Procedure'] = plot_procedure_df2['Procedure'].astype(str)
plot_procedure_df2.sort_values(by=['Percentage'])
plot_procedure_df2.plot(x ='Procedure', y='Percentage', kind='bar', color ='yellow',
title='Procedure Distribution- Outpatient', figsize=(15,7));
# In[24]:
plot_diagnosis_df2['Diagnosis'] = plot_diagnosis_df2['Diagnosis'].astype(str)
plot_diagnosis_df2.sort_values(by=['Percentage'])
plot_diagnosis_df2.plot(x ='Diagnosis', y='Percentage', kind='bar', color ='yellow',
title='Diagnosis Distribution- Outpatient', figsize=(15,7))
# In[25]:
T_fraud = train['PotentialFraud'].value_counts()
grouped_train_df = T_fraud.to_frame()
grouped_train_df.columns = ['count']
grouped_train_df['Fraud'] = grouped_train_df.index
grouped_train_df['Percentage'] = (grouped_train_df['count']/sum(grouped_train_df['count']))*100
grouped_train_df['Percentage'].plot( kind='bar',color = "blue", title = 'Distribution')
# In[26]:
Train_f = pd.DataFrame(columns = ['PotentialFraud', 'Provider'])
Train_f = train.loc[(train['PotentialFraud'] == 'Yes')]
Train_f
# In[27]:
fraud_provider_ip_df = pd.merge(inpatient, Train_f, how='inner', on='Provider')
fraud_provider_ip_df
# In[28]:
len(fraud_provider_ip_df)
# In[29]:
(len(fraud_provider_ip_df)/len(inpatient)) * 100
# In[30]:
fraud_provider_op_df = pd.merge(outpatient, Train_f, how='inner', on='Provider')
fraud_provider_op_df
# In[31]:
len(fraud_provider_op_df)
# In[32]:
(len(fraud_provider_op_df)/len(outpatient))*100
# In[33]:
df_procedures2 = pd.DataFrame(columns = ['Procedures'])
df_procedures2['Procedures'] = pd.concat([fraud_provider_ip_df["ClmProcedureCode_1"], fraud_provider_ip_df["ClmProcedureCode_2"], fraud_provider_ip_df["ClmProcedureCode_3"], fraud_provider_ip_df["ClmProcedureCode_4"], fraud_provider_ip_df["ClmProcedureCode_5"], fraud_provider_ip_df["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures2['Procedures'].head(10)
# In[34]:
grouped_F_procedure_df = df_procedures2['Procedures'].value_counts()
grouped_F_procedure_df
# In[35]:
grouped_F_procedure_df2 = grouped_F_procedure_df.to_frame()
grouped_F_procedure_df2.columns = ['count']
grouped_F_procedure_df2['Procedure'] = grouped_F_procedure_df2.index
grouped_F_procedure_df2['Percentage'] = (grouped_F_procedure_df2['count']/sum(grouped_F_procedure_df2['count']))*100
grouped_F_procedure_df2['Percentage']
# In[36]:
df_diagnosis2 = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis2['Diagnosis'] = pd.concat([fraud_provider_ip_df["ClmDiagnosisCode_1"], fraud_provider_ip_df["ClmDiagnosisCode_2"], fraud_provider_ip_df["ClmDiagnosisCode_3"], fraud_provider_ip_df["ClmDiagnosisCode_4"], fraud_provider_ip_df["ClmDiagnosisCode_5"], fraud_provider_ip_df["ClmDiagnosisCode_6"], fraud_provider_ip_df["ClmDiagnosisCode_7"], fraud_provider_ip_df["ClmDiagnosisCode_8"], fraud_provider_ip_df["ClmDiagnosisCode_9"], fraud_provider_ip_df["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis2['Diagnosis'].head(10)
# In[37]:
grouped_F_diagnosis_df = df_diagnosis2['Diagnosis'].value_counts()
grouped_F_diagnosis_df
# In[38]:
grouped_F_diagnosis_df2 = grouped_F_diagnosis_df.to_frame()
grouped_F_diagnosis_df2.columns = ['count']
grouped_F_diagnosis_df2['Diagnosis'] = grouped_F_diagnosis_df2.index
grouped_F_diagnosis_df2['Percentage'] = (grouped_F_diagnosis_df2['count']/sum(grouped_F_diagnosis_df2['count']))*100
grouped_F_diagnosis_df2['Percentage']
# In[39]:
plot_F_procedure_df1 = grouped_F_procedure_df2.head(20)
plot_F_diagnosis_df1 = grouped_F_diagnosis_df2.head(20)
# In[40]:
plot_F_procedure_df1.plot(x ='Procedure', y='Percentage', kind = 'bar', color ='g', figsize=(15,7))
# In[41]:
plot_F_diagnosis_df1.plot(x ='Diagnosis', y='Percentage', kind = 'bar', color ='y', figsize=(15,7))
# In[42]:
df_procedures_op2 = pd.DataFrame(columns = ['Procedures'])
df_procedures_op2['Procedures'] = pd.concat([fraud_provider_op_df["ClmProcedureCode_1"], fraud_provider_op_df["ClmProcedureCode_2"], fraud_provider_op_df["ClmProcedureCode_3"], fraud_provider_op_df["ClmProcedureCode_4"], fraud_provider_op_df["ClmProcedureCode_5"], fraud_provider_op_df["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures_op2['Procedures'].head(10)
# In[43]:
grouped_F_procedure_op_df = df_procedures_op2['Procedures'].value_counts()
grouped_F_procedure_op_df.head()
# In[44]:
grouped_F_procedure_opdf2 = grouped_F_procedure_op_df.to_frame()
grouped_F_procedure_opdf2.columns = ['count']
grouped_F_procedure_opdf2['Procedure'] = grouped_F_procedure_opdf2.index
grouped_F_procedure_opdf2['Percentage'] = (grouped_F_procedure_opdf2['count']/sum(grouped_F_procedure_opdf2['count']))*100
grouped_F_procedure_opdf2['Percentage'].head()
# In[45]:
df_diagnosis_op2 = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis_op2['Diagnosis'] = pd.concat([fraud_provider_op_df["ClmDiagnosisCode_1"], fraud_provider_op_df["ClmDiagnosisCode_2"], fraud_provider_op_df["ClmDiagnosisCode_3"], fraud_provider_op_df["ClmDiagnosisCode_4"], fraud_provider_op_df["ClmDiagnosisCode_5"], fraud_provider_op_df["ClmDiagnosisCode_6"], fraud_provider_op_df["ClmDiagnosisCode_7"], fraud_provider_op_df["ClmDiagnosisCode_8"], fraud_provider_op_df["ClmDiagnosisCode_9"], fraud_provider_op_df["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis_op2['Diagnosis'].head()
# In[46]:
grouped_F_diagnosis_op_df = df_diagnosis2['Diagnosis'].value_counts()
grouped_F_diagnosis_op_df.head()
# In[47]:
grouped_F_diagnosis_opdf2 = grouped_F_diagnosis_op_df.to_frame()
grouped_F_diagnosis_opdf2.columns = ['count']
grouped_F_diagnosis_opdf2['Diagnosis'] = grouped_F_diagnosis_opdf2.index
grouped_F_diagnosis_opdf2['Percentage'] = (grouped_F_diagnosis_opdf2['count']/sum(grouped_F_diagnosis_opdf2['count']))*100
grouped_F_diagnosis_opdf2['Percentage'].head()
# In[48]:
plot_F_procedure_opdf1 = grouped_F_procedure_opdf2.head(20)
plot_F_diagnosis_opdf1 = grouped_F_diagnosis_opdf2.head(20)
# In[49]:
plot_F_procedure_opdf1.plot(x ='Procedure', y='Percentage', kind = 'bar', color ='g', figsize=(15,7))
# In[50]:
plot_F_diagnosis_opdf1.plot(x ='Diagnosis', y='Percentage', kind = 'bar', color ='y', figsize=(15,7))
# In[51]:
beneficiary.head()
# In[52]:
fraud_beneficiary_ip_op_df = pd.merge(beneficiary, fraud_provider_ip_df, how='inner', on='BeneID')
fraud_beneficiary_ip_op_df.head()
# In[53]:
Train_F_Beneficiary_grouped = fraud_beneficiary_ip_op_df['State'].value_counts()
Train_F_Beneficiary_grouped.head()
# In[54]:
Train_F_Beneficiary_grouped1 = Train_F_Beneficiary_grouped.to_frame()
Train_F_Beneficiary_grouped1['Count'] = Train_F_Beneficiary_grouped1['State']
Train_F_Beneficiary_grouped1['STATE'] = Train_F_Beneficiary_grouped1.index
Train_F_Beneficiary_grouped1 = Train_F_Beneficiary_grouped1.drop(['State'], axis = 1)
Train_F_Beneficiary_grouped1 = Train_F_Beneficiary_grouped1.head(20)
Train_F_Beneficiary_grouped1
# In[55]:
Train_F_Beneficiary_grouped1.plot(x ='STATE', y='Count', kind = 'bar', figsize= (15,7));
# In[56]:
fraud_beneficiary_ip_op_df['DOB'] = pd.to_datetime(fraud_beneficiary_ip_op_df['DOB'], format='%Y-%m-%d')
now = pd.to_datetime('2009-12-01' , format = '%Y-%m-%d') # Assuming this is 2009 data as the last recorded death is for 2009
fraud_beneficiary_ip_op_df['DOB'] = fraud_beneficiary_ip_op_df['DOB'].where(fraud_beneficiary_ip_op_df['DOB'] < now)
fraud_beneficiary_ip_op_df['age'] = (now - fraud_beneficiary_ip_op_df['DOB']).astype('<m8[Y]')
ax = fraud_beneficiary_ip_op_df['age'].plot.hist(bins=20, alpha=0.5, figsize=(8, 6), edgecolor='b')
# In[57]:
beneficiary['DOB'] = pd.to_datetime(beneficiary['DOB'], format='%Y-%m-%d')
now = pd.to_datetime('2009-12-01' , format = '%Y-%m-%d') # Assuming this is 2009 data as the last recorded death is for 2009
beneficiary['DOB'] = beneficiary['DOB'].where(beneficiary['DOB'] < now)
beneficiary['age'] = (now - beneficiary['DOB']).astype('<m8[Y]')
ax = beneficiary['age'].plot.hist(bins=20, alpha=0.5, figsize=(8, 6), edgecolor='b')
# In[58]:
beneficiary['DOB'] = pd.to_datetime(beneficiary['DOB'], format='%Y-%m-%d')
now = pd.to_datetime('2009-12-01' , format = '%Y-%m-%d') # Assuming this is 2009 data as the last recorded death is for 2009
beneficiary['DOB'] = beneficiary['DOB'].where(beneficiary['DOB'] < now)
beneficiary['age'] = (now - beneficiary['DOB']).astype('<m8[Y]')
ax = beneficiary['age'].plot.hist(bins=20, alpha=0.5, figsize=(8, 6), edgecolor='b')
# In[59]:
ax = inpatient['InscClaimAmtReimbursed'].plot.hist(bins=20, alpha=0.5, figsize=(8, 6), facecolor='g', edgecolor='g')
# Insurance Claim amount reimbursed.
# In[60]:
import seaborn as sns
inpatient_1 = pd.merge(inpatient, train, how='inner', on='Provider')
g = sns.FacetGrid(inpatient_1, col='PotentialFraud', height=8)
g.map(plt.hist, 'InscClaimAmtReimbursed', bins=20, color = 'g')
# In[61]:
inpatient_1 = inpatient_1.loc[(inpatient_1['PotentialFraud'] == 'Yes')]
Total = inpatient_1['InscClaimAmtReimbursed'].sum()
print(Total)
# In[62]:
ax = outpatient['InscClaimAmtReimbursed'].plot.hist(bins=100,range=[0,5000], alpha=0.5, figsize=(8, 6), facecolor='c', edgecolor='k')
# In[63]:
outpatient_1 = pd.merge(outpatient, train, how='inner', on='Provider')
g = sns.FacetGrid(outpatient_1, col='PotentialFraud', height=8)
g.map(plt.hist, 'InscClaimAmtReimbursed', bins=20, range=[0, 5000], color ='c')
# In[64]:
beneficiary.isna().sum()
# In[65]:
beneficiary['DOB'] = pd.to_datetime(beneficiary['DOB'] , format = '%Y-%m-%d')
beneficiary['DOD'] = | pd.to_datetime(beneficiary['DOD'],format = '%Y-%m-%d',errors='ignore') | pandas.to_datetime |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = | pd.Timestamp(data_start, tz=tz) | pandas.Timestamp |
""" ``xrview.handlers`` """
import asyncio
import numpy as np
import pandas as pd
from bokeh.document import without_document_lock
from bokeh.models import ColumnDataSource
from pandas.core.indexes.base import InvalidIndexError
from tornado import gen
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
# TODO this fixes issues with tornado>=5, but it might also be the reason for
# the backed up range update callbacks
# see https://github.com/tornadoweb/tornado/issues/2531
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
class DataHandler(object):
"""
Parameters
----------
data : pandas DataFrame
"""
def __init__(self, data):
self.source = ColumnDataSource(data)
self.source.add(data.index, "index")
class InteractiveDataHandler(DataHandler):
def __init__(self, data, context=None, verbose=False):
super(InteractiveDataHandler, self).__init__(data)
self.data = data
self.source_data = self.source.data
self.context = context
self.verbose = verbose
self.selection_bounds = None
self.selection = []
self.pending_update = False
self.update_buffer = None
self.callbacks = {
"update_data": [],
"reset_data": [],
"update_source": [],
}
def get_dict(self):
""" Get data as a dict. """
new_source_data = self.data.to_dict(orient="list")
new_source_data["index"] = self.data.index
for k in list(new_source_data):
if isinstance(k, tuple):
new_source_data["_".join(k)] = new_source_data.pop(k)
return new_source_data
@without_document_lock
@gen.coroutine
def update(self, **kwargs):
""" Update callback for handler. """
self.pending_update = True
self.update_data(**kwargs)
self.update_selection()
if self.context is not None and self.context.doc is not None:
self.context.doc.add_next_tick_callback(self.update_source)
@without_document_lock
@gen.coroutine
def reset(self):
""" Reset data and selection to be displayed. """
self.selection_bounds = None
self.selection = []
for c in self.callbacks["reset_data"]:
c()
if self.context is not None and self.context.doc is not None:
self.context.doc.add_next_tick_callback(self.update_source)
@without_document_lock
@gen.coroutine
def update_data(self, **kwargs):
""" Update data and selection to be displayed. """
self.source_data = self.get_dict()
for c in self.callbacks["update_data"]:
c()
@without_document_lock
@gen.coroutine
def update_selection(self):
""" Update selection. """
if (
self.source.selected is not None
and self.selection_bounds is not None
):
self.selection = list(
np.where(
(self.source_data["index"] >= self.selection_bounds[0])
& (self.source_data["index"] <= self.selection_bounds[1])
)[0]
)
else:
self.selection = []
@gen.coroutine
def update_source(self):
""" Update data and selected.indices of self.source """
if self.verbose:
print("Updating source")
self.source.data = self.source_data
if self.source.selected is not None:
self.source.selected.indices = self.selection
for c in self.callbacks["update_source"]:
c()
self.pending_update = False
if self.update_buffer is not None:
self.context.doc.add_next_tick_callback(self.update_buffer)
self.update_buffer = None
def add_callback(self, method, callback):
""" Add a callback to one of this instance's methods.
Parameters
----------
method : str
The name of the method this callback will be attached to.
callback : callable
The callback function.
"""
if method not in self.callbacks:
raise ValueError("Unrecognized method name: " + str(method))
if callback in self.callbacks[method]:
raise ValueError(
str(callback) + " has already been attached to this instance."
)
self.callbacks[method].append(callback)
class ResamplingDataHandler(InteractiveDataHandler):
"""
Parameters
----------
data : pandas DataFrame
factor : numeric
lowpass : bool, default False
context : TimeseriesViewer, optional
with_range : bool, default True
"""
def __init__(
self,
data,
factor,
lowpass=False,
context=None,
with_range=True,
verbose=False,
):
self.data = data
self.factor = factor
self.lowpass = lowpass
self.context = context
self.verbose = verbose
if with_range:
self.source_data = self.get_dict_from_range(
self.data.index[0], self.data.index[-1]
)
self.source = ColumnDataSource(self.source_data)
else:
self.source = ColumnDataSource(self.data)
self.source.add(self.data.index, "index")
self.source_data = self.source.data
self.selection_bounds = None
self.selection = []
self.pending_update = False
self.update_buffer = None
self.callbacks = {
"update_data": [],
"reset_data": [],
"update_source": [],
}
@staticmethod
def from_range(data, max_samples, start, end, lowpass):
""" Get sub-sampled pandas DataFrame from index range.
Parameters
----------
data : pandas DataFrame
The data to be sub-sampled
max_samples : numeric
The subsampling factor.
start : numeric
The start of the range to be sub-sampled.
end : numeric
The end of the range to be sub-sampled.
Returns
-------
data_new : pandas DataFrame
A sub-sampled slice of the data.
"""
# handle the case of no data
if data.shape[0] == 0:
return data
if start is None:
start = 0
else:
try:
start = data.index.get_loc(start, method="nearest")
except InvalidIndexError:
# handle non-ordered/non-unique index
start = np.argmin(np.abs(data.index - start))
if end is None:
end = data.shape[0]
else:
try:
end = data.index.get_loc(end, method="nearest") + 1
except InvalidIndexError:
# handle non-ordered/non-unique index
end = np.argmin(np.abs(data.index - end)) + 1
step = int(np.ceil((end - start) / max_samples))
# TODO: handle NaNs at start/end
if step == 0:
# hacky solution for range reset
data_new = pd.concat((data.iloc[:1], data.iloc[-1:]))
else:
data_new = data.iloc[start:end]
if step > 1 and lowpass:
# TODO make this work
from scipy.signal import butter, filtfilt
for c in data_new.columns:
if c != "selected":
coefs = butter(3, 1 / step)
data_new[c] = filtfilt(
coefs[0], coefs[1], data_new.loc[:, c]
)
data_new = data_new.iloc[::step]
# hacky solution for range reset
if start > 0:
data_new = pd.concat((data.iloc[:1], data_new))
if end < data.shape[0] - 1:
data_new = data_new.append(data.iloc[-1])
return data_new
def get_range(self, start=None, end=None):
""" Get the range of valid indexes for the data to be displayed.
Parameters
----------
start : numeric
The start of the range to be displayed.
end : numeric
The end of the range to be displayed.
Returns
-------
start : numeric
The adjusted start.
end : numeric
The adjusted end.
"""
# handle the case of no data
if self.data.shape[0] == 0 or self.source.data["index"].shape[0] == 0:
return None, None
first_source_idx = self.source.data["index"][0]
last_source_idx = self.source.data["index"][-1]
# convert to timestamp if necessary
if isinstance(self.data.index, pd.DatetimeIndex):
start = | pd.to_datetime(start, unit="ms") | pandas.to_datetime |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import unittest
import copy
import sys
import numpy as np
import pandas as pd
import unittest.mock as mock
from unittest.mock import patch
# Package import
from pynet.datasets.core import DataManager, ArrayDataset
class TestDataManager(unittest.TestCase):
""" Test the DataManager class.
"""
def setUp(self):
""" Setup test.
"""
self.input_arr = np.ones((10, 1, 4, 4))
for item in range(self.input_arr.shape[0]):
self.input_arr[item] *= item
self.output_arr = np.ones((10, 2, 4, 4))
offset = self.input_arr.shape[0]
for item in range(offset, self.output_arr.shape[0] + offset):
self.output_arr[item - offset] *= item
data = {
"label": ["group1"] * 5 + ["group2"] * 5,
"age": np.random.randint(20, 60, 10),
"sex": ["M"] * 6 + ["F"] * 4
}
self.metadata = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#! python3
# 2019-03-27 by recs
# ===check the current owner of type licenses===
import os
import pandas as pd
from spareparts.lib.settings import temp_jde, tempo_local
index_manual = ["How to fill fileds in the Data Tab", "Unnamed: 1", "Unnamed: 2"]
index_auto = [
"Item Number",
"Number(Drawing)",
"Quantity",
"Equipment",
"Module",
"Level of significance",
"Category",
"Other Information",
"UOM",
"ST",
"Description 1",
"Description 2",
"Search Text",
"Unit Cost",
"Extended Cost",
"jdelitm",
"prp1",
"prp2",
"file_name",
"Type",
"DIM",
"Comm Class",
"Supplier",
"Item Pool",
]
# Path to temporary_jde.csv in windows OS.
if os.path.join(tempo_local, temp_jde):
path_to_jde = os.path.join(tempo_local, temp_jde)
else:
print("the temporary jde file is not in the TEMPO of RECS")
def extract_items_auto(file):
"""
Extraction column: item number
"""
data = pd.read_excel(file, sheet_name="spl", header=0, usecols="A", dtype={0: str})
data["Item Number"] = data["Item Number"].str.strip()
data = data.dropna(how="all")
serie = pd.Series(data["Item Number"])
serie = serie.unique().tolist()
return set(serie)
def extract_items_manual(file):
"""
Extraction column: item number
"""
data = pd.read_excel(file, sheet_name="Data", header=0, usecols="A", dtype={0: str})
data.columns = ["items"]
data["items"] = data["items"].str.strip()
data = data.dropna(how="all")
serie = pd.Series(data["items"])
serie = serie.unique().tolist()[1:]
return set(serie)
def parsing_items(name_file):
name_file = str(name_file)
if pd.read_excel(name_file).columns.tolist() == index_manual:
return extract_items_manual(name_file)
elif | pd.read_excel(name_file) | pandas.read_excel |
import pandas as pd
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders as encoders
X = pd.DataFrame({
'none': [
'A', 'A', 'B', None, None, 'C', None, 'C', None, 'B',
'A', 'A', 'C', 'B', 'B', 'A', 'A', None, 'B', None
],
'na_categorical': [
'A', 'A', 'C', 'A', 'B', 'C', 'C', 'A', np.nan, 'B', 'A',
'C', 'C', 'A', 'B', 'C', np.nan, 'A', np.nan, np.nan
]
})
X_t = pd.DataFrame({
'none': [
'A', 'C', None, 'B', 'C', 'C', None, None, 'A',
'A', 'C', 'A', 'B', 'A', 'A'
],
'na_categorical': [
'C', 'C', 'A', 'B', 'C', 'A', np.nan, 'B', 'A', 'A',
'B', np.nan, 'A', np.nan, 'A'
]
})
class TestCountEncoder(TestCase):
def test_count_defaults(self):
"""Test the defaults are working as expected on 'none' and 'categorical'
which are the most extreme edge cases for the count encoder."""
enc = encoders.CountEncoder(verbose=1)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([5, 3, 6]).isin(out['none'].unique()).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([6, 3]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(enc.mapping is not None)
def test_count_handle_missing_string(self):
"""Test the handle_missing string on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
handle_missing='return_nan'
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_missing)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 3)
self.assertTrue(pd.Series([6, 7, 3]).isin(out['na_categorical']).all())
self.assertFalse(pd.Series([4]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_missing_dict(self):
"""Test the handle_missing dict on 'none' and 'na_categorical'.
We want to see differing behavour between 'none' and 'na_cat' cols."""
enc = encoders.CountEncoder(
handle_missing={'na_categorical': 'return_nan'}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_missing)
self.assertTrue(pd.Series([5, 3, 6]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([6, 7, 3]).isin(out['na_categorical']).all())
self.assertFalse(pd.Series([4]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_unknown_string(self):
"""Test the handle_unknown string on 'none' and 'na_categorical'.
The 'handle_missing' must be set to 'return_nan' in order to test
'handle_unkown' correctly."""
enc = encoders.CountEncoder(
handle_missing='return_nan',
handle_unknown='return_nan',
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_unknown)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 3)
self.assertTrue(pd.Series([3, 6, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_unknown_dict(self):
"""Test the 'handle_unkown' dict with all non-default options."""
enc = encoders.CountEncoder(
handle_missing='return_nan',
handle_unknown={
'none': -1,
'na_categorical': 'return_nan'
},
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_unknown)
self.assertTrue(pd.Series([6, 5, 3, -1]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([3, 6, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_min_group_size_int(self):
"""Test the min_group_size int on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(min_group_size=7)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertIn(np.nan, enc.mapping['none'])
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (2,))
self.assertIn('B_C_nan', enc.mapping['na_categorical'])
self.assertFalse(np.nan in enc.mapping['na_categorical'])
def test_count_min_group_size_dict(self):
"""Test the min_group_size dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={'none': 6, 'na_categorical': 7}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._min_group_size)
self.assertTrue(pd.Series([6, 8]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertIn(np.nan, enc.mapping['none'])
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (2,))
self.assertIn('B_C_nan', enc.mapping['na_categorical'])
self.assertFalse(np.nan in enc.mapping['na_categorical'])
def test_count_combine_min_nan_groups_bool(self):
"""Test the min_nan_groups_bool on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=7,
combine_min_nan_groups=False
)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 3)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([9, 7, 4]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 3)
self.assertTrue(enc.mapping is not None)
self.assertIn(np.nan, enc.mapping['na_categorical'])
def test_count_combine_min_nan_groups_dict(self):
"""Test the combine_min_nan_groups dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={
'none': 6,
'na_categorical': 7
},
combine_min_nan_groups={
'none': 'force',
'na_categorical': False
}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._combine_min_nan_groups)
self.assertTrue(pd.Series([14, 6]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([9, 7, 4]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 3)
self.assertTrue(enc.mapping is not None)
self.assertIn(np.nan, enc.mapping['na_categorical'])
def test_count_min_group_name_string(self):
"""Test the min_group_name string on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=6,
min_group_name='dave'
)
enc.fit(X)
self.assertIn('dave', enc.mapping['none'])
self.assertEqual(enc.mapping['none']['dave'], 8)
self.assertIn('dave', enc.mapping['na_categorical'])
self.assertEqual(enc.mapping['na_categorical']['dave'], 7)
def test_count_min_group_name_dict(self):
"""Test the min_group_name dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={
'none': 6, 'na_categorical': 6
},
min_group_name={
'none': 'dave', 'na_categorical': None
}
)
enc.fit(X)
self.assertIn('none', enc._min_group_name)
self.assertIn('dave', enc.mapping['none'])
self.assertEqual(enc.mapping['none']['dave'], 8)
self.assertIn('B_nan', enc.mapping['na_categorical'])
self.assertEqual(enc.mapping['na_categorical']['B_nan'], 7)
def test_count_normalize_bool(self):
"""Test the normalize bool on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=6,
normalize=True
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._normalize)
self.assertTrue(out['none'].round(5).isin([0.3, 0.4]).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([0.3, 0.35]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 2)
self.assertTrue(enc.mapping is not None)
def test_count_normalize_dict(self):
"""Test the normalize dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=7,
normalize={
'none': True, 'na_categorical': False
}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._normalize)
self.assertTrue(out['none'].round(5).isin([0.3 , 0.15, 0.25]).all())
self.assertEqual(out['none'].unique().shape[0], 3)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue( | pd.Series([13, 7]) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from math import ceil, floor
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
train_data = pd.read_csv('train.csv')
train_labels = train_data['ACTION']
del train_data['ACTION']
train_data.shape
# role_title and role_code give the same information
# check this
len(set(train_data['ROLE_CODE'].values))
len(set(train_data['ROLE_TITLE'].values))
# check that there is a unique title for each code
for code in set(train_data['ROLE_CODE']):
code_ind = np.equal(train_data['ROLE_CODE'], code)
titles = set(train_data[code_ind]['ROLE_TITLE'])
if len(titles) > 1:
print(f'Mismatch for code {code}')
break
print(f'Code {code} = title {titles}')
del train_data['ROLE_CODE']
train_data.shape
# if there are less than 20 members in a factor level, change the level to a value of zero
for col in range(0, train_data.shape[1]):
levels = train_data.iloc[:, col].value_counts().index.values
vals_to_change = [level for level in levels if train_data.iloc[:, col].value_counts()[level] <= 20]
n_rows = (train_data.iloc[:, col]).shape[0]
for row in range(n_rows):
print(f'Column {col}, row {row} of {n_rows}')
if train_data.iat[row, col] in vals_to_change:
train_data.iat[row, col] = 0
# encode features as categorical variables
train_categorical = pd.get_dummies(train_data, columns=train_data.columns)
train_categorical.shape
'''
# comress the data with factor analysis
from sklearn.decomposition import FactorAnalysis
compressor = FactorAnalysis(n_components=200)
train_compressed = compressor.fit_transform(train_categorical, y_train)
'''
# check class distribution
train_labels.value_counts() # data is unbalanced – don't optimize accuracy directly
# test/train split
X_train, X_test, y_train, y_test = train_test_split(train_categorical, train_labels.values, test_size=0.3)
# X_train, X_test, y_train, y_test = train_test_split(train_data, train_labels, test_size=0.3)
# duplicate data points from the minority class
# y_train.value_counts()
n_maj, n_min = list(pd.DataFrame(y_train).value_counts())
# number of times to replicate each minority class memeber
n_repeat = floor(n_maj / n_min)
# minority class indices (indices with 0)
min_ind = np.equal(y_train, 0)
X_train_aug = X_train
for _ in range(n_repeat):
X_train_aug = np.concatenate((X_train_aug, X_train[min_ind]), axis=0)
X_train_aug.shape
y_train_aug = y_train
for _ in range(n_repeat):
y_train_aug = np.concatenate((y_train_aug, y_train[min_ind]), axis=0)
# shuffle the indices
perm = np.random.permutation(range(X_train_aug.shape[0]))
X_train_aug = X_train_aug[perm, :]
y_train_aug = y_train_aug[perm]
#####################################
# fit a random forest classifier
#####################################
rf = RandomForestClassifier(verbose=1, n_jobs=4, n_estimators=300)
rf.fit(X_train_aug, y_train_aug)
# make predictions using fitted model
predictions = rf.predict(X_test)
# class balance of predictions
pd.DataFrame(predictions).value_counts()
# calculate the cost-adjusted accuracy
print(metrics.classification_report(y_test, predictions))
print(metrics.confusion_matrix(y_test, predictions))
print(metrics.roc_auc_score(y_test, predictions))
#####################################
# build a neural network to fit
#####################################
import tensorflow as tf
# convert pd dataframes to np arrays
X_train_aug = X_train_aug
y_train_aug = y_train_aug.reshape((y_train_aug.shape[0], 1))
'''
X_train_nn = X_train
y_train_nn = y_train.reshape((y_train.shape[0], 1))
'''
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(100, input_shape=(X_train_aug.shape[1],), activation='relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1000, activation='relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1, activation='sigmoid')
])
'''
# logistic regression
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1, input_shape=(X_train_aug.shape[1], ), activation='sigmoid'),
])
'''
model.summary()
model.compile(optimizer='Adam', loss='binary_crossentropy', weighted_metrics=['acc'])
history = model.fit(X_train_aug, y_train_aug, batch_size=256, epochs=100, verbose=1, validation_split=0.2,
class_weight={1: n_repeat / (n_repeat + 1), 0: 1 / n_repeat})
# make predictions
predictions_nn = [1 if y > 0.5 else 0 for y in model.predict(X_test)]
# check class balance
pd.DataFrame(predictions_nn).value_counts()
# calculate area under ROC curve
print(metrics.classification_report(predictions_nn, y_test))
print(metrics.confusion_matrix(predictions_nn, y_test))
print(metrics.roc_auc_score(predictions_nn, y_test))
############################
# cross validation to get a better estimate of the performance
############################
def convert_train_to_categorical(X_train_raw):
X_train_processed = X_train_raw.copy(deep=True)
for col in range(0, X_train_processed.shape[1]):
levels = X_train_processed.iloc[:, col].value_counts().index.values
vals_to_change = [level for level in levels if X_train_processed.iloc[:, col].value_counts()[level] <= 20]
print(f'Column {col}')
n_rows = (X_train_processed.iloc[:, col]).shape[0]
for row in range(n_rows):
if X_train_processed.iat[row, col] in vals_to_change:
X_train_processed.iat[row, col] = 0
# encode features as categorical variables
train_categorical = | pd.get_dummies(X_train_processed, columns=X_train_processed.columns) | pandas.get_dummies |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import json
import logging
from typing import List, Optional, Any, Dict, Union, Tuple
import numpy as np
import torch
from kats.consts import TimeSeriesData
from kats.tsfeatures.tsfeatures import TsFeatures
from numba import jit
from torch import Tensor
from torch.nn.modules.loss import _Loss
all_validation_metric_name = ["smape", "sbias", "exceed"]
import pandas as pd
"""
A module for utility functions of global models, including:
1) Helper functions for preprocessing and calculating error metrics.
2) NN cells Classes and RNN Class: :class:`LSTM2Cell`, :class:`S2Cell`, and :class:`DilatedRNNStack`.
3) Loss function Classes: :class:`PinballLoss` and :class:`AdjustedPinballLoss`.
4) Basic Classes for global model hyper-parameters and time series features: :class:`GMParam` and :class:`GMFeature`.
"""
# for jit
NoneT = torch.FloatTensor([-1e38])
# Define all possible gmfeatures
all_possible_gmfeatures = [
"last_date",
"simple_date",
"tsfeatures",
"ts2vec",
"last_hour",
"last_hour_minute",
"last_month",
]
@jit
def get_filters(isna_idx, seasonality) -> np.ndarray:
"""Helper function for adding NaNs to time series.
Args:
isna_idx: A np.ndarry indicating whether the corresponding element is NaN or not.
seasonality: An integer indicating the seasonality period.
Returns:
A `numpy.ndarray` object representing whether or not to keep the corresponding element.
"""
n = len(isna_idx)
i = 0
flips = []
while i < n:
if isna_idx[i]:
cnt = 1
j = i + 1
while j < n and isna_idx[j]:
cnt += 1
j += 1
if cnt >= seasonality:
diff = cnt % seasonality
flips.append((i + diff, j))
i = j
else:
i += 1
filters = np.array([True] * n)
for (i, j) in flips:
filters[i:j] = False
return filters
def fill_missing_value_na(
ts: TimeSeriesData,
seasonality: int,
freq: Optional[Union[str, pd.Timedelta]] = None,
) -> TimeSeriesData:
"""Padding holes in time series with NaNs, such that the timestamp difference between any two consecute timestamps is either zero or a multipler of seasonality.
Args:
ts: A :class:`kats.consts.TimeSeriesData` object representing the time series to be padded.
seasonality: An integer representing the period of seasonality, should be positive integer.
freq: A string or a `pandas.Timedelta` object representing the frequency of time series data.
Returns:
A :class:`kats.consts.TimeSeriesData` object representing the padded time series.
"""
if freq is None:
freq = ts.infer_freq_robust()
elif isinstance(freq, str):
try:
if freq[0].isdigit():
freq = pd.to_timedelta(freq)
else:
freq = pd.to_timedelta("1" + freq)
except Exception as e:
msg = f"Fail to convert freq to pd.Timedelta with error message {e}."
logging.error(msg)
raise ValueError(msg)
elif not isinstance(freq, pd.Timedelta):
msg = f"freq should be either str or pd.Timedela but receives {type(freq)}."
logging.error(msg)
raise ValueError(msg)
if len(ts) == (ts.time.max() - ts.time.min()) / freq or seasonality == 1:
return ts
else:
df = ts.to_dataframe()
col_name = [t for t in df.columns.values if t != ts.time_col_name][0]
time_name = ts.time_col_name
all_ds = pd.DataFrame(
pd.date_range(df.time.iloc[0], df.time.iloc[-1], freq=freq),
columns=[time_name],
)
all_ds = all_ds.merge(df, on=time_name, how="left")
isna_idx = all_ds[col_name].isna().values
filters = get_filters(isna_idx, seasonality)
return TimeSeriesData(all_ds.loc[filters])
def split(
splits: int,
overlap: bool,
train_TSs: Union[Dict[Any, TimeSeriesData], List[TimeSeriesData]],
valid_TSs: Union[Dict[Any, TimeSeriesData], List[TimeSeriesData], None],
) -> List[Tuple[Dict[Any, TimeSeriesData], Optional[Dict[Any, TimeSeriesData]]]]:
"""Split dataset into sub-datasets.
Args:
splits: An integer representing the number of sub-datasets to create.
overlap: A boolean indicating whether the sub-datasets overlap with each other.
train_TSs: A dictionary or a list of :class:`kats.consts.TimeSeriesData` objects representing the training time series.
valid_TSs: A dictionary or a list of :class:`kats.consts.TimeSeriesData` objects representing the validation time series.
Return:
A list of tuples of dictionaries of :class:`kats.consts.TimeSeriesData` objects. Each element t is a tuple, t[0] is a dictionary of training time series and t[1] is a dictionary of validation time series.
"""
n = len(train_TSs)
keys = (
np.array(list(train_TSs.keys()))
if isinstance(train_TSs, dict)
else np.arange(n)
)
if splits == 1: # no need to split the dataset
return [
(
{t: train_TSs[t] for t in keys},
{t: valid_TSs[t] for t in keys} if valid_TSs is not None else None,
)
]
m = n // splits
if m == 0:
msg = f"Fail to split {n} time series into {splits} subsets."
logging.error(msg)
raise ValueError(msg)
seps = list(range(0, n, m))
if len(seps) == splits + 1:
seps[-1] = n
else:
seps.append(n)
index = []
for i in range(splits):
tmp = np.array([False] * n)
tmp[seps[i] : seps[i + 1]] = True
index.append(tmp)
if overlap:
split_data = [
(
{t: train_TSs[t] for t in keys[~index[i]]},
{t: valid_TSs[t] for t in keys[~index[i]]}
if valid_TSs is not None
else None,
)
for i in range(splits)
]
else:
split_data = [
(
{t: train_TSs[t] for t in keys[index[i]]},
{t: valid_TSs[t] for t in keys[index[i]]}
if valid_TSs is not None
else None,
)
for i in range(splits)
]
return split_data
class LSTM2Cell(torch.nn.Module):
"""A modified version of LSTM cell where the output (of size=state_size) is split between h state (of size=h_size) and
the real output that goes to the next layer (of size=state_size-h_size)
Attributes:
input_size: An integer representing the number of expected features in the input tensor.
h_size: An integer representing h state size.
state_size: An integer representing c state size.
"""
def __init__(self, input_size: int, h_size: int, state_size: int):
super(LSTM2Cell, self).__init__()
self.lxh = torch.nn.Linear(input_size + 2 * h_size, 4 * state_size)
self.h_size = h_size
self.out_size = state_size - h_size
# jit does not like Optional, so we have to use bool variables and NoneT
def forward(
self,
input_t: Tensor,
has_prev_state: bool,
has_delayed_state: bool,
prev_h_state: Tensor = NoneT,
delayed_h_state: Tensor = NoneT,
c_state: Tensor = NoneT,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
"""Forward function of LSTM2Cell.
Args:
input_t: A `torch.Tensor` object representing input features of shape (batch_size, input_size)
has_prev_state : A boolean specifying whether or not to have previous state.
has_delayed_state: A boolean specifying whether or not to have delayed state.
prev_h_state: Optional; A `torch.Tensor` object representing previsous h_state of shape (batch_size, h_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
delayed_h_state: Optional; A `torch.Tensor` object representing delayed h_state of shape (batch_size, h_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
c_state: Optional; A `torch.Tensor` object representing c_state of shape (batch_size, state_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
Returns:
output_t, (h_state, new_state), where output_t is `torch.Tensor` object representing outputs of shape (batch_size, state_size-h_size);
h_state is a `torch.Tensor` object representing the next h_state of shape (batch_size, h_size);
new_state is a `torch.Tensor` object representing the next c_state of shape (batch_size, state_size).
"""
if has_delayed_state:
xh = torch.cat([input_t, prev_h_state, delayed_h_state], dim=1)
elif has_prev_state:
xh = torch.cat([input_t, prev_h_state, prev_h_state], dim=1)
else:
empty_h_state = torch.zeros(
input_t.shape[0], 2 * self.h_size, dtype=torch.float32
)
xh = torch.cat([input_t, empty_h_state], dim=1)
gates = self.lxh(xh)
chunked_gates = torch.chunk(gates, 4, dim=1)
forget_gate = (chunked_gates[0] + 1).sigmoid()
in_gate = chunked_gates[1].sigmoid()
out_gate = chunked_gates[2].sigmoid()
new_state = chunked_gates[3].tanh()
if has_prev_state:
new_state = (forget_gate * c_state) + (in_gate * new_state)
whole_output = out_gate * new_state.tanh()
output_t, h_state = torch.split(
whole_output, [self.out_size, self.h_size], dim=1
)
return output_t, (h_state, new_state)
class S2Cell(torch.nn.Module):
"""Slawek's S2 cell.
A NN cell which is a mix of GRU and LSTM, which also splits output into h and the "real output".
Attributes:
input_size: int
The number of expected features in the input tensor.
h_size: int
The number of expected features in the h_state.
state_size: int
The number of expected features in the c_state.
"""
def __init__(self, input_size: int, h_size: int, state_size: int):
super(S2Cell, self).__init__()
self.lxh = torch.nn.Linear(input_size + 2 * h_size, 4 * state_size)
self.h_size = h_size
self.state_size = state_size
self.out_size = state_size - h_size
# jit does not like Optional, so we have to use bool variables and NoneT
def forward(
self,
input_t: Tensor,
has_prev_state: bool,
has_delayed_state: bool,
prev_h_state: Tensor = NoneT,
delayed_h_state: Tensor = NoneT,
prev_c_state: Tensor = NoneT,
delayed_c_state: Tensor = NoneT,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
"""Forward method of S2Cell module.
Args:
input_t: A `torch.Tensor` object representing input features of shape (batch_size, input_size).
has_prev_state : A boolean specifying whether or not to have previous state.
has_delayed_state: A boolean specifying whether or not to have delayed state.
prev_h_state: Optional; A `torch.Tensor` object representing previsous h_state of shape (batch_size, h_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
delayed_h_state: Optional; A `torch.Tensor` object representing delayed h_state of shape (batch_size, h_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
prev_c_state: Optional; A `torch.Tensor` object representing previous c_state of shape (batch_size, state_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
delayed_c_state: A `torch.Tensor` object representing delayed c_state of shape (batch_size, state_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
Returns:
A tuple of `torch.tensor` objects, (i.e., output_t, (h_state, new_stat)), where output_t is `torch.Tensor` object representing outputs of shape (batch_size, state_size-h_size);
h_state is a `torch.Tensor` object representing the next h_state of shape (batch_size, h_size); new_state is a `torch.Tensor` object representing the next c_state of shape (batch_size, state_size).
"""
if has_delayed_state:
xh = torch.cat([input_t, prev_h_state, delayed_h_state], dim=1)
elif has_prev_state:
xh = torch.cat([input_t, prev_h_state, prev_h_state], dim=1)
else:
empty_h_state = torch.zeros(
input_t.shape[0], 2 * self.h_size, dtype=torch.float
)
xh = torch.cat([input_t, empty_h_state], dim=1)
gates = self.lxh(xh)
chunked_gates = torch.chunk(gates, 4, dim=1)
forget_gate = (chunked_gates[0] + 1).sigmoid()
new_stat = chunked_gates[1].tanh()
out_gate = chunked_gates[3].sigmoid()
if has_prev_state:
if has_delayed_state:
alpha = chunked_gates[2].sigmoid()
weighted_c_state = alpha * prev_c_state + (1 - alpha) * delayed_c_state
else:
weighted_c_state = prev_c_state
new_stat = forget_gate * weighted_c_state + (1 - forget_gate) * new_stat
whole_output = out_gate * new_stat
output_t, h_state = torch.split(
whole_output, [self.out_size, self.h_size], dim=1
)
return output_t, (h_state, new_stat)
class DilatedRNNStack(torch.nn.Module):
"""The recurrent neural network module for global model.
Attributes:
nn_structure: A list of lists of integers representing the strucuture of neural network. For example, [[1,3],[6,12]] defines 2 blocks of 2 layers each and output adaptor layer, with a resNet-style shortcut between output of the first block (output of the second layer)
and output of the second block (output of 4th layer). The positive integers are the dilation number.
cell_name: A string representing the name of the cells, can be 'LSTM', 'LSTM2Cell' or 'S2Cell'.
input_size: An integer representing the number of expected features in the input tensor.
state_size: An integer representing the c state size (which is hidden_size for a standard LSTM cell).
output_size: An integer representing the number of expected features in the final output.
h_size: Optional; An integer representing the number of expected features in h_state. Default is None (i.e., not specified).
jit: Optional; A boolean specifying whether or not to jit each cell. Default is False.
"""
def __init__(
self,
nn_structure: List[List[int]],
cell_name: str,
input_size: int,
state_size: int,
output_size: Optional[int] = None,
h_size=None,
jit=False,
) -> None:
super(DilatedRNNStack, self).__init__()
block_num = len(nn_structure)
self.nn_structure = nn_structure
self.cell_name = cell_name
self.input_size = input_size
self.h_size = h_size
self.jit = jit
self.h_state_store = []
self.c_state_store = []
self.max_dilation = np.max([np.max(t) for t in nn_structure])
self.reset_state()
out_size = self._validate(cell_name, state_size, h_size)
self.cells = []
layer = 0
iblock = 0
for iblock in range(block_num):
for lay in range(len(nn_structure[iblock])):
if lay == 0 and iblock == 0:
tmp_input_size = input_size
else:
tmp_input_size = out_size
if cell_name == "LSTM2Cell":
if jit:
cell = torch.jit.script(
LSTM2Cell(tmp_input_size, h_size, state_size)
)
else:
cell = LSTM2Cell(tmp_input_size, h_size, state_size)
elif cell_name == "S2Cell":
if jit:
cell = torch.jit.script(
S2Cell(tmp_input_size, h_size, state_size)
)
else:
cell = S2Cell(tmp_input_size, h_size, state_size)
else:
cell = torch.nn.LSTMCell(tmp_input_size, state_size)
self.add_module("Cell_{}".format(layer), cell)
self.cells.append(cell)
layer += 1
if isinstance(output_size, int) and output_size > 0:
self.adaptor = torch.nn.Linear(out_size, output_size)
elif output_size is None:
self.adaptor = None
else:
msg = f"output_size should be either None (for encoder) or a positive integer, but receives {output_size}."
logging.error(msg)
raise ValueError(msg)
self.block_num = block_num
self.out_size = out_size
def _validate(self, cell_name: str, state_size: int, h_size: Optional[int]) -> int:
if cell_name not in ["LSTM2Cell", "S2Cell", "LSTM"]:
msg = f"Only support cells 'S2Cell', 'LSTM2Cell', 'LSTM' but receive {cell_name}."
logging.error(msg)
raise ValueError(msg)
if cell_name == "LSTM2Cell" or cell_name == "S2Cell":
if h_size is None:
msg = "h_size should be a positive integer smaller than state_size for LSTM2Cell or S2Cell."
logging.error(msg)
raise ValueError(msg)
if h_size >= state_size:
msg = "h_size should be smaller than state_size."
logging.error(msg)
raise ValueError(msg)
out_size = state_size - h_size
else:
out_size = state_size
return out_size
def prepare_decoder(self, decoder) -> None:
"""Prepare a DilatedRNNStack object used as decoder.
This function copies the last max_dilation tensors in h_state_store and c_state_store to decoder.
Args:
decoder: A :class:`DilatedRNNStack` object representing the decoder.
"""
decoder.h_state_store = self.h_state_store[-self.max_dilation :]
decoder.c_state_store = self.c_state_store[-self.max_dilation :]
return
def _forward_S2Cell(
self,
tmp_input: Tensor,
layer: int,
has_prev_state: bool,
has_delayed_state: bool,
t: int,
ti_1: int,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
"""forward function for S2Cell (to avoid lint warning)."""
if has_delayed_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
delayed_h_state=self.h_state_store[ti_1][layer],
prev_c_state=self.c_state_store[t - 1][layer],
delayed_c_state=self.c_state_store[ti_1][layer],
)
elif has_prev_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
prev_c_state=self.c_state_store[t - 1][layer],
)
else:
output_t, (h_state, new_state) = self.cells[layer](tmp_input, False, False)
return output_t, (h_state, new_state)
def _forward_LSTM2Cell(
self,
tmp_input: Tensor,
layer: int,
has_prev_state: bool,
has_delayed_state: bool,
t: int,
ti_1: int,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
"""Forward function for LSTM2Cell (to avoid lint warning)."""
if has_delayed_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
delayed_h_state=self.h_state_store[ti_1][layer],
c_state=self.c_state_store[ti_1][layer],
)
elif has_prev_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
c_state=self.c_state_store[t - 1][layer],
)
else:
output_t, (h_state, new_state) = self.cells[layer](tmp_input, False, False)
return output_t, (h_state, new_state)
def forward(self, input_t: Tensor) -> Tensor:
"""Forward method of DilatedRNNStack
Args:
input_t: A `torch.Tensor` object representing input features of shape (batch_size, input_size).
Returns:
A `torch.Tensor` object representing outputs of shape (batch_size, output_size).
"""
prev_block_output = torch.zeros(
input_t.shape[0], self.out_size, dtype=torch.float
)
t = len(self.h_state_store)
self.h_state_store.append([])
self.c_state_store.append([])
output_t = NoneT # just to initialize output_t
has_prev_state = t > 0
layer = 0
for iblock in range(self.block_num):
for lay in range(len(self.nn_structure[iblock])):
if lay == 0:
if iblock == 0:
tmp_input = input_t
else:
tmp_input = prev_block_output
else:
tmp_input = output_t
ti_1 = t - self.nn_structure[iblock][lay]
has_delayed_state = ti_1 >= 0
if self.cell_name == "S2Cell":
output_t, (h_state, new_state) = self._forward_S2Cell(
tmp_input, layer, has_prev_state, has_delayed_state, t, ti_1
)
elif self.cell_name == "LSTM2Cell":
output_t, (h_state, new_state) = self._forward_LSTM2Cell(
tmp_input, layer, has_prev_state, has_delayed_state, t, ti_1
)
else: # LSTM
if has_delayed_state:
h_state, new_state = self.cells[layer](
tmp_input,
(
self.h_state_store[ti_1][layer],
self.c_state_store[ti_1][layer],
),
)
elif has_prev_state:
h_state, new_state = self.cells[layer](
tmp_input,
(
self.h_state_store[t - 1][layer],
self.c_state_store[t - 1][layer],
),
)
else:
h_state, new_state = self.cells[layer](tmp_input)
output_t = h_state
self.h_state_store[t].append(h_state)
self.c_state_store[t].append(new_state)
layer += 1
prev_block_output = output_t + prev_block_output
if self.adaptor is not None:
output_t = self.adaptor(prev_block_output)
else:
output_t = prev_block_output
return output_t
def reset_state(self) -> None:
"""Clear all stored state tensors."""
self.h_state_store = []
self.c_state_store = []
class PinballLoss(_Loss):
"""Pinball Loss function module.
For quantile q (0<q<1), forecast value y_hat and true value y, the pinball loss function is defined as:
pinball(y_hat, y, q)=max((y-y_hat)*q, (y-y_hat)*(q-1)).
For quantiles Q = [q_1, q_2, ..., q_n] and weights W = [w_1, w_2, ..., w_n], forecasts Y_hat=[y_hat_1, ..., yhat_n] and true value y, the weighted pinball loss is defined as:
PinballLoss(Y_hat, Y) = Sum_i^n pinball(y_hat_i, y, q_i)*w_i.
This module provides functionality for computing weighted pinball loss.
Attributes:
quantile: A 1-dimensional `torch.Tensor` object representing the quantiles to be calculated.
weight: Optional; A 1-dimensional `torch.Tensor` object representing the weights for quantiles. Default is torch.Tensor([1/n,..,1/n]) where n the number of quantiles.
reduction: Optional; A string representing the reduction method. Can be 'mean' or 'sum'. Default is 'mean'.
"""
def __init__(
self, quantile: Tensor, weight: Optional[Tensor] = None, reduction: str = "mean"
) -> None:
super(PinballLoss, self).__init__(
size_average=None, reduce=None, reduction=reduction
)
if len(quantile) < 1:
msg = "quantile should not be empty."
logging.error(msg)
raise ValueError(msg)
if len(quantile.size()) != 1:
msg = "quantile should be a 1-dimentional tensor."
logging.error(msg)
raise ValueError(msg)
self.quantile = quantile
self.quantile.requires_grad = False
if weight is None:
d = len(quantile)
weight = torch.ones(d) / d
else:
if weight.size() != quantile.size():
msg = "weight and quantile should have the same size."
logging.error(msg)
raise ValueError(msg)
self.register_buffer("weight", weight)
self.reduction = reduction
def _check(self, input: Tensor, target: Tensor) -> None:
"""
Check input tensor and target tensor size.
"""
if target.size()[0] != input.size()[0]:
msg = "Input batch size is not equal to target batch size."
logging.error(msg)
raise ValueError(msg)
num_feature = target.size()[1] * len(self.quantile)
if input.size()[1] != num_feature:
msg = f"Input should contain {num_feature} features but receive {input.size()[1]}."
logging.error(msg)
raise ValueError(msg)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""
Args:
input: A `torch.Tensor` object representing forecasted values of shape (num, n_steps * n_quantiles), where n_quantiles is the length of quantile.
target: A `torch.Tensor` object contianing true values of shape (num, n_steps).
Returns:
A 1-dimensional `torch.Tensor` object representing the computed pinball loss of length the number of quantiles.
"""
self._check(input, target)
n = len(input)
m = len(self.quantile)
horizon = target.size()[1]
nans = torch.isnan(target).detach()
# clean up NaNs to avoid NaNs in gradient
target[nans] = 1.0
num_not_nan = (~nans).float().sum(dim=1)
num_not_nan[num_not_nan == 0] += 1
target = target.repeat(1, m)
nans = nans.repeat(1, m)
quants = self.quantile.repeat(horizon, 1).t().flatten()
weights = self.weight.repeat(horizon, 1).t().flatten()
diff = target - input
res = torch.max(diff * quants, diff * (quants - 1.0))
res[nans] = 0.0
res = res * weights
res = (
res.view(n, -1, horizon).sum(dim=2) / num_not_nan[:, None]
) # row_wise operation
if self.reduction == "mean":
return res.mean(dim=0)
if self.reduction == "sum":
return res.sum(dim=0)
return res
class AdjustedPinballLoss(_Loss):
"""Adjusted Pinball Loss function.
This is an adjusted version of pinball loss function in that when for the first quantile (i.e., should be 0.5 or close to 0.5), we normalize the original pinball loss with the average value of target and forecasts.
The idea is to optimize for sMAPE for the median forecasts (i.e., the forecasts for quantile 50). For the other quantile q (0<q<1), pinball loss is defined as:
pinball(y_hat, y, q)=max((y-y_hat)*q, (y-y_hat)*(q-1)),
where y_hat is the forecast value and y is the true value.
For quantiles Q = [q_1, q_2, ..., q_n] and weights W = [w_1, w_2, ..., w_n], forecasts Y_hat=[y_hat_1, ..., yhat_n] and true value y, the adjusted weighted pinball loss is defined as:
PinballLoss(Y_hat, Y) = 2*pinball(y_hat_1, y, q_1)/(y_hat_1+q_1) + Sum_{i=2}^n pinball(log(y_hat_i), log(y), q_i)*w_i.
Attributes:
quantil: A 1-dimensional `torch.Tensor` object representing quantiles to be calculated.
weight: Optional; A 1-dimensional `torch.Tensor` object representing the weights for quantiles. Default is torch.Tensor([1/n,..,1/n]) where n the number of quantiles.
reduction: Optional; A string representing the reduction method. Can be 'mean' or 'sum'. Default is 'mean'.
input_log: Optional; A boolean specifying whether or not the target and the forecast are of logarithmic scale. Default is True.
"""
def __init__(
self,
quantile: Tensor,
weight: Optional[Tensor] = None,
reduction: str = "mean",
input_log: bool = True,
) -> None:
super(AdjustedPinballLoss, self).__init__(
size_average=None, reduce=None, reduction=reduction
)
if len(quantile) < 1:
msg = "quantile should not be empty."
logging.error(msg)
raise ValueError(msg)
if len(quantile.size()) != 1:
msg = "quantile should be a 1-dimentional tensor."
logging.error(msg)
raise ValueError(msg)
self.quantile = quantile
self.quantile.requires_grad = False
if weight is None:
d = len(quantile)
weight = torch.ones(d) / d
else:
if weight.size() != quantile.size():
msg = "weight and quantile should have the same size."
logging.error(msg)
raise ValueError(msg)
self.register_buffer("weight", weight)
self.reduction = reduction
self.input_log = input_log
def _check(self, input: Tensor, target: Tensor) -> None:
"""
Check input tensor and target tensor size.
"""
if target.size()[0] != input.size()[0]:
msg = "Input batch size is not equal to target batch size."
logging.error(msg)
raise ValueError(msg)
num_feature = target.size()[1] * len(self.quantile)
if input.size()[1] != num_feature:
msg = f"Input should contain {num_feature} features but receive {input.size()[1]}."
logging.error(msg)
raise ValueError(msg)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""Forward method of AdjustedPinballLoss module.
Args:
input: A `torch.Tensor` object representing the forecasts of shape (num, n_steps * n_quantiles), where n_quantiles is the length of quantile.
target: A `torch.Tensor` object representing true values of shape (num, n_steps)
Returns:
A 1-dimensional `torch.Tensor` object representing the computed pinball loss of length the number of quantiles.
"""
self._check(input, target)
n = len(input)
m = len(self.quantile)
horizon = target.size()[1]
nans = torch.isnan(target).detach()
# avoid nans appear in the loss
target[nans] = 1.0
num_not_nan = (~nans).float().sum(dim=1)
num_not_nan[num_not_nan == 0] += 1
if self.input_log:
target_exp = torch.exp(target)
fcst_exp = torch.exp(input[:, :horizon])
else:
target_exp = target
fcst_exp = input[:, :horizon]
diff = target_exp - fcst_exp
res = (
torch.max(diff * self.quantile[0], diff * (self.quantile[0] - 1.0))
/ (target_exp + fcst_exp)
* 2
)
res[nans] = 0.0
if m > 1:
if self.input_log:
fcst = input[:, horizon:]
else:
fcst = torch.log(input[:, horizon:])
m -= 1
target = target.repeat(1, m)
nans = nans.repeat(1, m)
quants = self.quantile[1:].repeat(horizon, 1).t().flatten()
diff_q = target - fcst
res_q = torch.max(diff_q * quants, diff_q * (quants - 1.0))
res_q[nans] = 0.0
res = torch.cat([res, res_q], dim=1)
weights = self.weight.repeat(horizon, 1).t().flatten()
res = res * weights
res = res.view(n, -1, horizon).sum(dim=2) / num_not_nan[:, None]
if self.reduction == "mean":
return res.mean(dim=0)
if self.reduction == "sum":
return res.sum(dim=0)
return res
class GMFeature:
"""Module for computing time series features for global model
We currently support the following features:
1) last date feature: a binary features computed on the last timestamp
2) simple date feature: such as date of week/month/year, etc
3) tsfeatures: features defined in Kats tsfeature module
4) time series embedding: embedding from Kats time2vec model # TODO
This class provides methods including get_base_features and get_on_the_fly_features.
Attributes:
feature_type: A string or a list of strings representing the feature names. Each string should be in ['last_date', 'simple_date', 'tsfeatures', 'ts2vec', 'last_hour'].
"""
def __init__(self, feature_type: Union[List[str], str]) -> None:
self.all_possible_gmfeatures = all_possible_gmfeatures
if isinstance(feature_type, str):
feature_type = [feature_type]
if not set(feature_type).issubset(set(self.all_possible_gmfeatures)):
msg = f"feature_type must from {self.all_possible_gmfeatures}."
logging.error(msg)
raise ValueError(msg)
self.feature_type = feature_type
def get_feature_size(self, ts_length: int) -> int:
"""Calculate the length of feature matrix (i.e., dim 1 of feature matrix) of a time series of length ts_length.
Args:
ts_length: An integer representing the length of the time series.
Returns:
An integer presenting the length of the feature.
"""
fixed_feature_lengths = {
"tsfeatures": 40,
"ts2vec": 0,
"last_date": 7 + 27 + 31,
"last_hour": 24,
"last_hour_minute": 2,
"last_month": 12,
}
varied_feature_lengths = {"simple_date": 4}
ans = 0
for f in self.feature_type:
ans += fixed_feature_lengths.get(f, 0)
ans += varied_feature_lengths.get(f, 0) * ts_length
return int(ans)
@staticmethod
def _get_tsfeatures(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""
private method to get Kats tsfeatures
please refer kats.tsfeatures for more details
"""
features = []
for i in range(len(x)):
features.append(
np.log(
np.abs(
list(
# pyre-fixme[16]: `List` has no attribute `values`.
TsFeatures()
.transform(
TimeSeriesData(
pd.DataFrame(
{"time": time[i], "value": x[i]}
).dropna() # NaNs would mess-up tsfeatures
)
)
.values()
)
)
)
)
features = torch.tensor(features)
# filter out NaN and inf
features[torch.isnan(features)] = 0.0
features[torch.isinf(features)] = 0.0
return features
@staticmethod
def _get_date_feature(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""Private method to get simple date features
We leverage the properties from `pandas.DatetimeIndex`, and the feature includes:
- day
- month
- dayofweek
- dayofyear
"""
feature = []
for i in range(len(x)):
pdt = pd.to_datetime(
time[i]
) # converting data type only once to speed up computation
feature.append(
np.concatenate(
[
pdt.day.values,
pdt.month.values,
pdt.dayofweek.values,
pdt.dayofyear.values,
]
)
)
feature = (torch.tensor(feature) + 1.0).log()
return feature
@staticmethod
def _get_last_date_feature(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""Compute date features for the last timestamp."""
n = len(time)
m = 7 + 27 + 31
offset = np.arange(0, n * m, m)
ans = np.zeros(n * m)
pdt = pd.to_datetime(time[:, -1])
indices = []
# compute day of week indices
indices.append(pdt.dayofweek.values + offset)
# compute bi-week indices
indices.append((pdt.weekofyear.values - 1) // 2 + 7 + offset)
# compute day of month indices
indices.append(pdt.day.values + 6 + 27 + offset)
indices = np.concatenate(indices)
ans[indices] = 1.0
return torch.tensor(ans.reshape(n, -1), dtype=torch.get_default_dtype())
@staticmethod
def _get_last_hour_feature(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""
Compute hour features for the last timestamp.
"""
n = len(time)
ans = np.zeros(n * 24)
indices = pd.to_datetime(time[:, -1]).hour.values + np.arange(0, n * 24, 24)
ans[indices] = 1.0
return torch.tensor(ans.reshape(n, -1), dtype=torch.get_default_dtype())
@staticmethod
def _get_last_month_feature(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""
Compute month features for the last timestamp.
"""
n = len(time)
ans = np.zeros(n * 12)
indices = pd.to_datetime(time[:, -1]).month.values + np.arange(0, n * 12, 12)
ans[indices] = 1.0
return torch.tensor(ans.reshape(n, -1), dtype=torch.get_default_dtype())
@staticmethod
def _get_ts2vec(
x: np.ndarray,
time: np.ndarray,
):
# TODO after ts2vec model lands
pass
@staticmethod
def _get_last_hour_minute_feature(
x: np.ndarray,
time: np.ndarray,
) -> torch.Tensor:
"""
Compute minute features for the last timestamp.
"""
pdt = | pd.to_datetime(time[:, -1]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
| tm.assert_almost_equal(rs, xp) | pandas.util.testing.assert_almost_equal |
import pandas as pd
import numpy as np
import os
import sys
import pdb
from scipy.stats import binom_test
from statsmodels.stats import multitest
from collections import Counter
from GLOBAL_VAR import *
alignmetn_dir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/TFBS_ChIP_seq/STAR_output'
SNP_in_TFBS_dir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/TFBS_ChIP_seq/STAR_output_GTExSNPs/'
outdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/ChIP_ASB'
def get_ASB_ratio(save_read_counts_fn, reads_filter = 10):
reads_count = | pd.read_csv('%s/reads_count/%s' % (outdir, save_read_counts_fn), sep='\t', index_col = [0,1]) | pandas.read_csv |
'''
Created on Dec 14, 2016
Purpose: Given a list of keggKO Results from "Detail Page". Create a map which contains
further information besides protein ID (e.g. HOG membership)
Purpose2: For individual lists containing this secondary information extract all its
genes by id and extract all its annotated genes and pathways
@author: bardya
'''
import os
import subprocess
import csv
import sqlite3
import pandas as pd
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='generate fasta files of all groups, naming scheme: OG<#species>_<groupID>.fa')
parser.add_argument('-omawd', dest='oma_workpath', metavar='<oma_working_directory_path>', required= True,
help='path to the OMA working directory')
parser.add_argument('-omaout', dest='oma_output_dirname', metavar='<oma_output_dirname>', default = "Results",
help='base directory of the OMA output')
parser.add_argument('-m', '--mode', dest='mode', metavar='<hog|og|hogaware>', type=str,
choices=["hog", "hogs", "HOG", "HOGs", "og", "ogs", "OG", "OGS", "HOGAware","HOGAWARE","hogaware","hogAware"],
default="ogs", help='based on selected mode parse the OrthologousGroups.orthoxml or the HierarchicalGroups.orthoxml file located in OMA output directory.')
parser.add_argument('-f', '--force', dest='force_flag', metavar='<force overwrite flag>', action='store_const', const=1, default=0,
help='if set, the output in the directory "./Bins" will be overwritten')
parser.add_argument('--no-stats', dest='stats_flag', metavar='<stats_to_stdout_flag>', action='store_const', const=0, default=1,
help='if set, script does not give out a statistical overview to stdout')
parser.add_argument('--no-accessory', dest='accesory_flag', metavar='<produce accessory genomes flag>', action='store_const', const=1, default=0,
help='if set, script gives out the accessory genomes into the directory "./Accessory" relativ to omawd')
parser.add_argument('-t', '--speciestree', dest='nwcktree', metavar='<path/to/tree.file>', type=argparse.FileType('rt'),
help='path to a file containing the species tree in string representation')
parser.add_argument('--version', action='version', version='0.1')
return parser.parse_args()
def clearCheckPath(outdir, force=0):
if os.path.isdir(outdir):
if force:
import shutil
shutil.rmtree(outdir, ignore_errors=True)
else:
raise IOError("Output Directory already exiting. Specify '-f' option to force overwrite")
os.makedirs(outdir)
def clearCheckFile(filepath, force=0):
if os.path.isfile(filepath):
if force:
import shutil
shutil.rmtree(filepath, ignore_errors=True)
else:
raise IOError("Output File already exiting. Specify '-f' option to force overwrite")
def createSqlTable(con, filepath, tablename, columnnames, sep='\t', mode='fail', primary_key=()):
'''creates a database table '''
try:
df = | pd.read_csv(filepath, sep=sep, names=columnnames, index_col=False) | pandas.read_csv |
from __future__ import division
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
import pandas as pd
import pytz
import re
from datetime import timedelta
from bisect import bisect # operate as sorted container
import copy
from random import randint
from tcapy.conf.constants import Constants
from tcapy.util.utilfunc import UtilFunc
from tcapy.util.loggermanager import LoggerManager
constants = Constants()
from tcapy.util.customexceptions import *
class TimeSeriesOps(object):
"""TimeSeriesOps provides generalised time series operations on DataFrame objects, which are used throughout the
library. These include filtering a DataFrame by start/finish dates, joining DataFrames, doing VLOOKUP style operations,
calculating open/high/low/close prices for defined resampling periods etc.
"""
day_of_week_ordinals = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5, 'sun': 6}
month_of_year_ordinals = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12}
def __init__(self):
self._util_func = UtilFunc()
# self.logger = LoggerManager().getLogger(__name__)
def concat_dataframe_list(self, df_list, sort=True):
"""Concatenates a list of DataFrames into a single DataFrame and sorts them. Removes any empty or None elements
from the list and optionally sorts them)
Parameters
----------
df_list : DataFrame (list)
DataFrames to be concatenated
sort : bool (default: True)
Sorts final concatenated DataFrame by index
Returns
-------
DataFrame
"""
if df_list is not None:
# Remove and empty dataframes from the list
if isinstance(df_list, list):
df_list = [x for x in df_list if x is not None]
df_list = [x for x in df_list if not x.empty]
else:
return df_list
# Only concatenate if any non-empty dataframes are left
if len(df_list) > 0:
# Careful: concatenating DataFrames can change the order, so insist on arranging by old cols
old_cols = df_list[0].columns
if len(df_list) == 1:
df_list = df_list[0]
if sort:
df_list = df_list.sort_index()
else:
df_list = pd.concat(df_list, sort=sort)
return df_list[old_cols]
return None
def nanify_array_based_on_other(self, array_to_match, matching_value, array_to_filter):
"""Make elements of an array NaN, depending on matches in another (same-sized) array.
Parameters
----------
array_to_match : np.array
Array to match one
matching_value : double
What matching array should filter to
array_to_filter : np.array
Array to be NaNified where matches
Returns
-------
np.array
"""
return np.where(array_to_match == matching_value, np.nan, array_to_filter) # ie. put NaN for sells
def downsample_time_series_floats(self, df, do_downsample):
"""Downsamples numerical values in a DataFrame to float32
Parameters
----------
df : DataFrame
Data to be downsample
do_downsample : bool
Flag to activate function
Returns
-------
DataFrame
"""
if do_downsample:
for i, j in zip(df.columns, df.dtypes):
if str(j) == 'float64':
df[i] = df[i].astype(np.float32)
return df
def downsample_time_series_usable(self, df, start_date=None, finish_date=None, field='mid'):
"""Creates a downsampled version of data ready for plotting.
Parameters
----------
df : DataFrame
Time series data, typically containing the mid price for a ticker
start_date : str
Start date of plot
finish_date : str
Finish date of plot
Returns
-------
pd.DataFrame,
"""
# Curtail the time series we plot to a specific date range
if start_date is not None and finish_date is not None:
df = self.filter_between_dates(df, start_date, finish_date)
# Get the resampling rate which will fit the maximum number of chart data points
seconds = self.calculate_resample_period(df)
# Resample mid into open/high/low/close and everything else (ie. bid/ask/mid) into mean
downsampled_df = pd.concat(
[self.resample_time_series(df[field], resample_amount=seconds, how='ohlc',
unit='seconds'),
self.resample_time_series(df, resample_amount=seconds, how='mean',
unit='seconds')],
axis=1
)
return downsampled_df
def filter_between_dates(self, df, start, finish):
"""Filters a DataFrame between two specific dates (on an inclusive basis)
Parameters
----------
df : DataFrame
DataFrame to be filtered
start : str
Start date
finish : str
Finish date
Returns
-------
DataFrame
"""
return df.loc[(start <= df.index) & (df.index <= finish)]
def remove_between_dates(self, df, start=None, finish=None):
"""Removes the entries between a defined start/finish date/time (on an inclusive basis)
Parameters
----------
df : DataFrame
Data to be filtered
start : Timestamp
Date to start deletion
finish : Timestamp
Date to finish deletion
Returns
-------
DataFrame
"""
if start is not None and finish is not None:
if isinstance(df.index, pd.DatetimeIndex):
start_df = df.loc[df.index[0]:start]
finish_df = df.loc[finish:df.index[len(df.index)-1]]
df = | pd.concat([start_df, finish_df]) | pandas.concat |
import json
import os
import tempfile
import shutil
import pandas as pd
from sample_sheet import Sample
from unittest import main, TestCase
from metapool import KLSampleSheet
from metapool.count import (_extract_name_and_lane, _parse_samtools_counts,
_parse_fastp_counts, bcl2fastq_counts,
fastp_counts, minimap2_counts, run_counts,
_parsefier)
class TestCount(TestCase):
def setUp(self):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.run_dir = os.path.join(data_dir, 'runs',
'200318_A00953_0082_AH5TWYDSXY')
self.ss = KLSampleSheet(os.path.join(self.run_dir, 'sample-sheet.csv'))
self.stats = | pd.DataFrame(RUN_STATS) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
from settings import *
"""
Augment the original training examples by adding anti-symmetrical ones in terms of left/right motion.
Doubles the quantity of examples. The new examples have reversed sign for the joint values of
'HeadYaw', 'HipRoll', swapped values for Arms (RArm to LArm and vice versa).
"""
# Original keyframes and destination file to write
data_x_set = 'df3_25fps.csv'
dest_x_set = 'df31_25fps.csv'
data_y_set = 'y_va_cat.csv'
dest_y_set = 'y_va_cat_aug.csv'
# If True, valence and arousal labels are also augmented
augment_labels = False
# Augment the training examples first
path = os.path.join(ROOT_PATH, RAW_DATA, data_x_set)
df = pd.read_csv(path, index_col=0)
# Reverse the sign of HeadYaw and HipRoll values
df_tr = df.copy(deep=True)
df_tr.loc[:, ['HeadYaw', 'HipRoll']] = -df_tr.loc[:, ['HeadYaw', 'HipRoll']]
# Swap sides: RArm to LArm and vice versa
swap_l = df_tr.loc[:, ['LShoulderPitch', 'LShoulderRoll', 'LElbowRoll', 'LElbowYaw', 'LWristYaw', 'LHand']]
swap_r = df_tr.loc[:, ['RShoulderPitch', 'RShoulderRoll', 'RElbowRoll', 'RElbowYaw', 'RWristYaw', 'RHand']]
df_tr[['LShoulderPitch', 'LShoulderRoll', 'LElbowRoll', 'LElbowYaw', 'LWristYaw', 'LHand']] = swap_r
df_tr[['RShoulderPitch', 'RShoulderRoll', 'RElbowRoll', 'RElbowYaw', 'RWristYaw', 'RHand']] = swap_l
# Inverse signs for arms roll and yaw joints
inv_joints = ['LShoulderRoll', 'LElbowRoll', 'LElbowYaw', 'LWristYaw', 'RShoulderRoll', 'RElbowRoll', 'RElbowYaw', 'RWristYaw']
df_tr.loc[:, inv_joints] = -df_tr.loc[:, inv_joints]
# Add a '_tr' suffix (stands for 'transformed') to the anim id
df_tr['id'] = df_tr['id'] + '_tr'
# Add to previous dataframe
augm_df = pd.concat([df, df_tr], ignore_index=True)
dest = os.path.join(ROOT_PATH, DATA_X_PATH, dest_x_set)
augm_df.to_csv(dest)
# Augment the labels
if augment_labels:
path = os.path.join(ROOT_PATH, DATA_Y_PATH, data_y_set)
df_y = | pd.read_csv(path, index_col=0) | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
expected = integer_array(ufunc(a.astype(float)))
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(integer_array(ufunc(a.astype(float))))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = integer_array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = ufunc(a.astype(float))
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = ufunc(s.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = integer_array(ufunc(a.astype(float), a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = integer_array(ufunc(a.astype(float), arr))
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = integer_array(ufunc(arr, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = integer_array(ufunc(a.astype(float), 1))
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = integer_array(ufunc(1, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
a = integer_array(values)
msg = r"The 'reduce' method is not supported."
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = np.array(data, dtype=object)
expected[data.isna()] = None
expected = pa.array(expected, type=data.dtype.name.lower(), from_pandas=True)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
| tm.assert_frame_equal(result, df) | pandas._testing.assert_frame_equal |
# -*- coding:Utf-8 -*-
"""
This module handles CORMORAN measurement data
CorSer Class
============
.. autoclass:: CorSer
:members:
Notes
-----
Useful members
distdf : distance between radio nodes (122 columns)
devdf : device data frame
"""
#import mayavi.mlab as mlabc
import os
import pdb
import sys
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.io as io
from pylayers.util.project import *
from pylayers.util.pyutil import *
from pylayers.mobility.ban.body import *
from pylayers.gis.layout import *
import pylayers.antprop.antenna as antenna
from matplotlib.widgets import Slider, CheckButtons, Button, Cursor
from pylayers.signal.DF import *
# from moviepy.editor import *
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
try:
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
#Those lines handle incompatibility between mayavi and VTK
#and redirect noisy warning message into a log file
# import vtk
# output=vtk.vtkFileOutputWindow()
# output.SetFileName("mayaviwarninglog.tmp")
# vtk.vtkOutputWindow().SetInstance(output)
def cor_log(short=True):
""" display cormoran measurement campaign logfile
Parameters
----------
short : boolean
enable short version
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> cor_log(short=True)
"""
filelog = os.path.join(os.environ['CORMORAN'],'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
if short :
log['day'] = [x.split('/')[0] for x in log['Date'].values]
log['serie']=log['Meas Serie']
return log[['serie','day','Subject','techno','Short Notes']]
else:
return log
def time2npa(lt):
""" convert pd.datetime.time to numpy array
Parameters
----------
lt : pd.datetime.time
Returns
-------
ta : numpy array
time in seconds
"""
ta = (lt.microsecond*1e-6+
lt.second+
lt.minute*60+
lt.hour*3600)
return(ta)
class CorSer(PyLayers):
""" Handle CORMORAN measurement data
Hikob data handling from CORMORAN measurement campaign
11/06/2014
single subject (Bernard and Nicolas)
12/06/2014
several subject (Jihad, Eric , Nicolas)
"""
def __init__(self,serie=6,day=11,source='CITI',layout=False):
"""
Parameters
----------
serie : int
day : int
source : string
Notes
-----
The environment variable CORMORAN is indicating the location of data directory
"""
assert (day in [11,12]),"wrong day"
try:
self.rootdir = os.environ['CORMORAN']
except:
raise NameError('Please add a CORMORAN environement variable \
pointing to the data')
# infos
self.serie = serie
self.day = day
self.loadlog()
if day == 11:
if serie in [7,8]:
raise 'Serie '+str(serie) + ' has no hkb data and will not be loaded'
if day ==12:
if serie in [17,18,19,20]:
raise AttributeError('Serie '+str(serie) + \
' has no hkb data and will not be loaded')
#Measures
if day==11:
self.stcr = [1,2,3,4,10,11,12,32,33,34,35,9,17,18,19,20,25,26]
self.shkb = [5,6,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.sbs = [5,6,7,8,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.mocap = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35]
self.mocapinterf=[]
if day==12:
self.stcr = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
self.shkb = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.sbs = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocap =[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocapinterf = [5,6,7,8,13,14,15,16,21,22,23,24,]
self.typ=''
# HIKOB
if serie in self.shkb:
self._loadhkb(serie=serie,day=day,source=source)
# IR-UWB TCR
if serie in self.stcr:
self._loadTCR(serie=serie,day=day)
# BeSpoon
if serie in self.sbs:
self._loadBS(serie=serie,day=day)
# set filename
if self.typ=='FULL':
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ.capitalize()
else:
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ
#Layout
if layout:
self.L= Layout('MOCAP-small2.lay')
# Load Infrastructure Nodes
self._loadinfranodes()
# Load cameras
self._loadcam()
#BODY & interferers
self.subject = str(self.log['Subject'].values[0].replace('jihad','Jihad')).split(' ')
#filter typos in self.subject
self.subject = [ x for x in self.subject if len(x)!=0 ]
if 'Jihad' in self.subject :
uj = self.subject.index('Jihad')
self.subject[uj]='Jihan'
if serie in self.mocap :
# load bodies from mocap file
self._loadbody(serie=serie,day=day)
self._distancematrix()
self._computedevpdf()
if isinstance(self.B,dict):
for b in self.B:
if hasattr(self,'L'):
self.B[b].traj.Lfilename=copy.copy(self.L._filename)
else:
self.B[b].traj.Lfilename='notloaded'
else :
self.B.traj.Lfilename=copy.copy(self.L._filename)
# reference time is tmocap
self.tmocap = self.B[self.subject[0]].time
# load offset dict
self.offset= self._load_offset_dict()
########################
#realign Radio on mocap
########################
# 1 - Resample radio time => mocap time
# 2 - (if available) apply offset
if ('BS' in self.typ) or ('FULL' in self.typ):
print( '\nBS data frame index: ',)
self._align_on_devdf(typ='BS')
print( 'Align on mocap OK...',)
try:
self._apply_offset('BS')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No BS offset not yet set => use self.offset_setter ')
if ('TCR' in self.typ) or ('FULL' in self.typ):
print ('\nTCR data frame index:', )
self._align_on_devdf(typ='TCR')
print ('Align on mocap OK...',)
try:
self._apply_offset('TCR')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No TCR offset not yet set => use self.offset_setter')
if ('HK' in self.typ) or ('FULL' in self.typ):
print ('\nHKB data frame index:',)
self._align_on_devdf(typ='HKB')
print ('Align on mocap OK...',)
try:
# self._apply_offset('HKB')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No HKB offset not yet set => use self.offset_setter')
print ('\nCreate distance Dataframe...',)
self._computedistdf()
print ('OK',)
def __repr__(self):
st = ''
st = st + 'filename : ' + self._filename + '\n'
st = st + 'filewear : ' + self.filewear + '\n'
st = st + 'filebody : ' + self.filebody + '\n'
st = st + 'filemocap : ' + self.filemocap + '\n'
st = st + 'Day : '+ str(self.day)+'/06/2014'+'\n'
st = st + 'Serie : '+ str(self.serie)+'\n'
st = st + 'Scenario : '+str(self.scenario)+'\n'
st = st + 'Run : '+ str(self.run)+'\n'
st = st + 'Type : '+ str(self.typ)+'\n'
st = st + 'Original Video Id : '+ str(self.video)+'\n'
st = st + 'Subject(s) : '
for k in self.subject:
st = st + k + ' '
st = st + '\n\n'
st = st+'Body available: ' + str('B' in dir(self)) + '\n\n'
try :
st = st+'BeSPoon : '+self._fileBS+'\n'
except:
pass
try :
st = st+'HIKOB : '+self._filehkb+'\n'
except:
pass
try :
st = st+'TCR : '+self._fileTCR+'\n'
except:
pass
st = st + '----------------------\n\n'
for k in self.log.columns:
st = st + k + ' :' + str(self.log[k].values)+'\n'
return(st)
# @property
# def dev(self):
# """ display device techno, id , id on body, body owner,...
# """
# title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
# print title + '\n' + '-'*len(title)
# if ('HK' in self.typ) or ('FULL' in self.typ):
# hkbkeys = self.idHKB.keys()
# hkbkeys.sort()
# for d in hkbkeys:
# dev = self.devmapper(self.idHKB[d],'HKB')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
# if ('TCR' in self.typ) or ('FULL' in self.typ):
# tcrkeys = self.idTCR.keys()
# tcrkeys.sort()
# for d in tcrkeys:
# dev = self.devmapper(self.idTCR[d],'TCR')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
@property
def dev(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print( title + '\n' + '='*len(title))
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
@property
def ant(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print (title + '\n' + '='*len(title) )
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print( '{0:66}'.format('-'*len(title) ))
def _loadcam(self):
""" load camera position
Returns
-------
update self.cam
"""
self.cam = np.array([
[-6502.16643961174,5440.97951452912,2296.44437108561],
[-7782.34866625776,4998.47624994092,2417.5861326688],
[8308.82897665828,3618.50516290547,2698.07710953287],
[5606.68337709102,-6354.17891528277,2500.27779697402],
[-8237.91886515041,-2332.98639475305,4765.31798299242],
[5496.0942989988,6216.91946236788,2433.30012872688],
[-8296.19706598514,2430.07325486109,4794.01607841197],
[7718.37527064615,-4644.26760522485,2584.75330667172],
[8471.27154730777,-3043.74550832061,2683.45089703377],
[-8213.04824602894,-4034.57371591121,2368.54548665579],
[-7184.66711497403,-4950.49444503781,2317.68563412347],
[7531.66103727189,5279.02353243886,2479.36291603544],
[-6303.08628709464,-7057.06193926342,2288.84938553817],
[-5441.17834354692,6637.93014323586,2315.15657646861],
[8287.79937470615,59.1614281340528,4809.14535447027]
])*1e-3
def _loadinfranodes(self):
""" load infrastructure nodes
nico
A4
mpts[6,7,8]
X
A3 A1
mpts[9,10,11] mpts[3,4,5]
X X
A2
mpts[0,1,2]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
bernard
A3
mpts[3,4,5]
X
A2 A4
mpts[6,7,8] mpts[0,1,2]
X X
A1
mpts[9,10,11]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
"""
filename = os.path.join(self.rootdir,'RAW','11-06-2014','MOCAP','scene.c3d')
print( "\nload infrastructure node position:",)
a, self.infraname, pts, i = c3d.ReadC3d(filename)
pts = pts/1000.
mpts = np.mean(pts, axis=0)
self.din={}
if ('HK' in self.typ) or ('FULL' in self.typ):
uhkb = np.array([[1,2], [4,5], [7,8], [10,11]])
mphkb = np.mean(mpts[uhkb], axis=1)
self.din.update(
{'HKB:1':{'p' : mphkb[3],
# 'T' : np.eye(3),
's3off' : 0.},
'HKB:2':{'p' : mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.} ,
'HKB:3':{'p':mphkb[1],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.},
'HKB:4':{'p':mphkb[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.}
})
# TCR:31 is the coordinator which was not captured.
# The position has been determined via optimization
if ('TCR' in self.typ) or ('FULL' in self.typ):
self.din.update({'TCR:32':{'p':mpts[9],
'T':np.eye(3),
's3off':0.1},
'TCR:24':{'p':mpts[6],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.1},
'TCR:27':{'p':mpts[3],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:28':{'p':mpts[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:31':{'p':array([1.7719,-3.2655,1.74]),
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.0}
})
if self.day == 12:
#BS idem HKB:1 and HKB:2
if ('BS' in self.typ) or ('FULL' in self.typ):
self.din.update(
{'BS:74':{'p':mphkb[3],
# 'T':np.eye(3),
's3off':-0.2},
'BS:157':{'p':mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':-0.2} ,
})
#load extra information from inifile (antenna, rotation matrix,...)
inifile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear','AccesPoints.ini')
config = ConfigParser.ConfigParser()
config.read(inifile)
for d in self.din:
self.din[d]['antname']=config.get(d,'file')
self.din[d]['ant']=antenna.Antenna(config.get(d,'file'))
self.din[d]['T']=eval(config.get(d,'t'))
self.din[d]['comment']=config.get(d,'comment')
# self.pts= np.empty((12,3))
# self.pts[:,0]= -mpts[:,1]
# self.pts[:,1]= mpts[:,0]
# self.pts[:,2]= mpts[:,2]
# return mpts
# self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def loadlog(self):
""" load in self.log the log of the current serie
from MeasurementLog.csv
"""
filelog = os.path.join(self.rootdir,'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
date = str(self.day)+'/06/14'
self.log = log[(log['Meas Serie'] == self.serie) & (log['Date'] == date)]
def _loadbody(self,day=11,serie=''):
""" load body from motion capture file
Parameters
----------
day :
serie :
"""
assert day in [11,12],"wrong day in _loadbody"
self.B={}
color=['LightBlue','YellowGreen','PaleVioletRed','white','white','white','white','white','white','white']
for us,subject in enumerate(self.subject):
print( "\nload ",subject, " body:",)
seriestr = str(self.serie).zfill(3)
if day == 11:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','serie_'+seriestr+'.c3d')
elif day == 12:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','Nav_serie_'+seriestr+'.c3d')
# body and wear directory
baw = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear')
if subject =='Jihad':
subject ='Jihan'
#
# Load body cylinder description : "Subject.ini"
# Load wearable device description (contains antenna filename) :
#
self.filebody = os.path.join(baw, subject + '.ini')
self.filewear = os.path.join(baw,subject + '_' +str(self.day)+'-06-2014_' + self.typ + '.ini')
if len(self.subject) >1 or self.mocapinterf:
multi_subject=True
else:
multi_subject=False
self.B.update({subject:Body(_filebody=self.filebody,
_filemocap=self.filemocap,unit = 'mm', loop=False,
_filewear=self.filewear,
centered=False,
multi_subject_mocap=multi_subject,
color=color[us])})
if self.serie in self.mocapinterf:
self.interf = ['Anis_Cylindre:',
'Benoit_Cylindre:',
'Bernard_Cylindre:',
'Claude_Cylindre:',
'Meriem_Cylindre:']
intertmp=[]
if self.serie==13:
self.interf.remove('Bernard_Cylindre:')
for ui,i in enumerate(self.interf):
#try:
print( "load ",i, " interfering body:",)
_filemocap = pyu.getshort(self.filemocap)
self.B.update({i:Cylinder(name=i,
_filemocap=_filemocap,
unit = 'mm',
color = color[ui])})
intertmp.append(i)
#except:
# print "Warning ! load ",i, " FAIL !"
self.interf=intertmp
else :
self.interf=[]
# if len(self.subject) == 1:
# self.B = self.B[self.subject]
def _loadTCR(self,day=11,serie='',scenario='20',run=1):
""" load TCR data
Parameters
----------
day :
serie :
scenario :
run :
"""
#
# TNET : (NodeId,MAC)
#
self.TNET={0:31,
1:2,
7:24,
8:25,
9:26,
10:27,
11:28,
12:30,
14:32,
15:33,
16:34,
17:35,
18:36,
19:37,
20:48,
21:49}
if day==11:
self.dTCR ={'Unused':49,
'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'HeadRight':34,
'TorsoTopRight':25,
'TorsoTopLeft':30,
'BackCenter':35,
'HipRight':2,
'WristRight':26,
'WristLeft':48,
'KneeLeft':33,
'AnkleRight':36,
'AnkleLeft':37}
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','TCR')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','TCR')
self.dTCR ={ 'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'Jihad:TorsoTopRight':35,
'Jihad:TorsoTopLeft':2,
'Jihad:BackCenter':33,
'Jihad:ShoulderLeft':37,
'Nicolas:TorsoTopRight':34,
'Nicolas:TorsoTopLeft':49,
'Nicolas:BackCenter':48,
'Nicolas:ShoulderLeft':36,
'Eric:TorsoCenter':30,
'Eric:BackCenter':25,
'Eric:ShoulderLeft':26}
#
# TCR : (Name , MAC)
# iTCR : (MAC , Name)
# dTCR : (NodeId, Name)
#
self.idTCR={}
for k in self.dTCR:
self.idTCR[self.dTCR[k]]=k
dTCRni={}
for k in self.TNET.keys():
dTCRni[k]=self.idTCR[self.TNET[k]]
files = os.listdir(dirname)
if serie != '':
try:
self._fileTCR = filter(lambda x : '_S'+str(serie)+'_' in x ,files)[0]
except:
self._fileTCR = filter(lambda x : '_s'+str(serie)+'_' in x ,files)[0]
tt = self._fileTCR.split('_')
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3].replace('.csv','').upper()
self.video = 'NA'
else:
filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileTCR = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
self.scenario= scenario
self.run = str(run)
filename = os.path.join(dirname,self._fileTCR)
dtTCR = pd.read_csv(filename)
tcr={}
for k in dTCRni:
for l in dTCRni:
if k!=l:
d = dtTCR[((dtTCR['ida']==k) & (dtTCR['idb']==l))]
d.drop_duplicates('time',inplace=True)
del d['lqi']
del d['ida']
del d['idb']
d = d[d['time']!=-1]
d.index = d['time']
del d['time']
if len(d)!=0:
sr = pd.Series(d['dist']/1000,index=d.index)
tcr[dTCRni[k]+'-'+dTCRni[l]]= sr
self.tcr = pd.DataFrame(tcr)
self.tcr = self.tcr.fillna(0)
ts = 75366400./1e9
t = np.array(self.tcr.index)*ts
t = t-t[0]
self.tcr.index = t
self.ttcr=self.tcr.index
def _loadBS(self,day=11,serie='',scenario='20',run=1):
""" load BeSpoon data
Parameters
----------
day : int
serie : string
scenario : string
run : int
"""
if day == 11:
self.dBS = {'WristRight':157,'AnkleRight':74,'HandRight':0}
elif day == 12:
self.dBS = {'AP1':157,'AP2':74,'HandRight':0}
self.idBS={}
for k in self.dBS:
self.idBS[self.dBS[k]]=k
if day==11:
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','BeSpoon')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','BeSpoon')
files = os.listdir(dirname)
if serie != '':
#self._fileBS = filter(lambda x : 'S'+str(serie) in x ,files)[0]
self._fileBS = [ x for x in files if 'S'+str(serie) in x ][0]
else:
self._fileBS = [ x for x in files if 'R'+str(serie) in x ][0]
#filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileBS = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
bespo = pd.read_csv(os.path.join(dirname,self._fileBS),index_col='ts')
gb = bespo.groupby(['Sensor'])
#get device id
devid,idevid = np.unique(bespo['Sensor'],return_index=True)
# get index of each group
dgb={d:gb.get_group(d) for d in devid}
lgb=[]
for i in dgb:
ind = dgb[i].index/1e3
dti = pd.to_datetime(ind,unit='s')
npai = time2npa(dti)
npai = npai - npai[0]
dgb[i].index=pd.Index(npai)
lgb.append(pd.DataFrame(dgb[i]['d'].values,columns=[self.idBS[0]+'-'+self.idBS[i]],index=dgb[i].index))
df = lgb[0].join(lgb[1])
self.bespo = df
#self.s157 = self.bespo[self.bespo['Sensor']==157]
#self.s157.set_index(self.s157['tu'].values/1e9)
#self.s74 = self.bespo[self.bespo['Sensor']==74]
#self.s74.set_index(self.s74['tu'].values/1e9)
#t157 = np.array(self.s157['tu']/(1e9))
#self.t157 = t157-t157[0]
#t74 = np.array(self.s74['tu']/(1e9))
#self.t74 = t74 - t74[0]
def _loadhkb(self,day=11,serie='',scenario='20',run=1,source='CITI'):
""" load hkb measurement data
Parameters
----------
day : string
serie : string
scenario : string
run : int
source : 'string'
Returns
-------
update self.hkb
"""
if day == 11:
if serie == 5:
source = 'UR1'
if day==11:
self.dHKB ={'AP1':1,'AP2':2,'AP3':3,'AP4':4,
'HeadRight':5,'TorsoTopRight':6,'TorsoTopLeft':7,'BackCenter':8,'ElbowRight':9,'ElbowLeft':10,'HipRight':11,'WristRight':12,'WristLeft':13,'KneeLeft':14,'AnkleRight':16,'AnkleLeft':15}
if source=='UR1' :
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB')
elif source=='CITI':
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB','CITI')
if day==12:
self.dHKB= {'AP1':1,'AP2':2,'AP3':3,'AP4':4,'Jihad:TorsoTopRight':10,'Jihad:TorsoTopLeft':9,'Jihad:BackCenter':11,'JihadShoulderLeft':12,
'Nicolas:TorsoTopRight':6,'Nicolas:TorsoTopLeft':5,'Nicolas:BackCenter':7,'Nicolas:ShoulderLeft':8,
'Eric:TooTopRight':15,'Eric:TorsoTopLeft':13,'Eric:BackCenter':16,'Eric:ShoulderLeft':14}
#if source=='UR1':
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','HIKOB')
files = os.listdir(dirname)
self.idHKB={}
for k in self.dHKB:
self.idHKB[self.dHKB[k]]=k
if serie != '':
self._filehkb = [ x for x in files if 'S'+str(serie) in x][0]
tt = self._filehkb.split('_')
if source == 'UR1':
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3]
self.video = tt[4].replace('.mat','')
elif source == 'CITI':
self.scenario=tt[0].replace('Sc','')+tt[1]
self.run = tt[3].replace('r','')
self.typ = tt[4]
if self.typ == 'HKB':
self.typ = 'HKBS'
self.video = tt[5].replace('.mat','')
else:
filesc = [ x for x in files if x in 'Sc'+scenario ][0]
if source=='UR1':
self._filehkb = [ x for x in filesc if x in 'R'+str(run)][0]
else:
self._filehkb = [ x for x in filesc if x in 'r'+str(run)][0]
data = io.loadmat(os.path.join(dirname,self._filehkb))
if source=='UR1':
self.rssi = data['rssi']
self.thkb = data['t']
else:
self.rssi = data['val']
self.thkb = np.arange(np.shape(self.rssi)[2])*25.832e-3
def topandas():
try:
self.hkb = pd.DataFrame(index=self.thkb[0])
except:
self.hkb = pd.DataFrame(index=self.thkb)
for k in self.idHKB:
for l in self.idHKB:
if k!=l:
col = self.idHKB[k]+'-'+self.idHKB[l]
rcol = self.idHKB[l]+'-'+self.idHKB[k]
if rcol not in self.hkb.columns:
rssi = self.rssi[k-1,l-1,:]
self.hkb[col] = rssi
topandas()
self.hkb = self.hkb[self.hkb!=0]
def compute_visibility(self,techno='HKB',square_mda=True,all_links=True):
""" determine visibility of links for a given techno
Parameters
----------
techno string
select the given radio technology of the nodes to determine
the visibility matrix
square_mda boolean
select ouput format
True : (device x device x timestamp)
False : (link x timestamp)
all_links : bool
compute all links or just those for which data is available
Return
------
if square_mda = True
intersection : (ndevice x nbdevice x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nbdevice)
name of the links
if square_mda = False
intersection : (nblink x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nblink x2)
name of the links
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=14,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> inter.shape
(15, 15, 12473)
>>>C.imshowvisibility_i(inter,links)
"""
if techno == 'TCR':
if not ((self.typ == 'TCR') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: ',techno)
hname = self.tcr.keys()
dnode=copy.copy(self.dTCR)
dnode.pop('COORD')
prefix = 'TCR:'
elif techno=='HKB':
if not ((self.typ == 'HKBS') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: '+techno)
hname = self.hkb.keys()
dnode=self.dHKB
prefix = 'HKB:'
# get link list
if all_links:
import itertools
links =[l for l in itertools.combinations(dnode.keys(),2)]
else:
links=[n.split('-') for n in hname]
links = [l for l in links if ('COORD' not in l[0]) and ('COORD' not in l[1])]
#mapping between device name in self.hkb and on body/in self.devdf
dev_bid = [self.devmapper(k,techno=techno)[2] for k in dnode.keys()]
nb_totaldev=len(np.unique(self.devdf['id']))
# extract all dev position on body
# Mpdev : (3 x (nb devices and nb infra nodes) x nb_timestamp)
Mpdev = np.empty((3,len(dev_bid),len(self.devdf.index)/nb_totaldev))
# get all positions
for ik,i in enumerate(dev_bid) :
if i in self.din:
Mpdev[:,ik,:] = self.din[i]['p'][:,np.newaxis]
else:
pts = self.devdf[self.devdf['id']==i][['x','y','z']].values.T
if np.prod(pts.shape)!=0:
Mpdev[:,ik,:] = pts
# create A and B from links
nA = np.array([prefix+ str(dnode[l[0]]) for l in links])
nB = np.array([prefix+ str(dnode[l[1]]) for l in links])
dma = dict(zip(dev_bid,range(len(dev_bid))))
mnA = [dma[n] for n in nA]
mnB = [dma[n] for n in nB]
A=Mpdev[:,mnA]
B=Mpdev[:,mnB]
# intersect2D matrix is
# d_0: nb links
#d_1: (cylinder number) * nb body + 1 * nb cylinder_object
# d_2 : nb frame
intersect2D = np.zeros((len(links),
11*len(self.subject) + len(self.interf),
Mpdev.shape[-1]))
# usub : index axes subject
usub_start=0
usub_stop=0
# C-D correspond to bodies segments
#C or D : 3 x 11 body segments x time
# radius of cylinders are (nb_cylinder x time)
for b in self.B:
print( 'processing shadowing from ',b)
# if b is a body not a cylinder
if not 'Cylindre' in b:
uta = self.B[b].sl[:,0].astype('int')
uhe = self.B[b].sl[:,1].astype('int')
rad = self.B[b].sl[:,2]
C = self.B[b].d[:,uta,:]
D = self.B[b].d[:,uhe,:]
try:
radius = np.concatenate((radius,rad[:,np.newaxis]*np.ones((1,C.shape[2]))),axis=0)
except:
radius = rad[:,np.newaxis]*np.ones((1,C.shape[2]))
usub_start=usub_stop
usub_stop=usub_stop+11
else:
cyl = self.B[b]
# top of cylinder
top = cyl.d[:,cyl.topnode,:]
# bottom of cylinder =top with z =0
bottom = copy.copy(cyl.d[:,cyl.topnode,:])
bottom[2,:]=0.02
#top 3 x 1 X time
C=top[:,np.newaxis,:]
D=bottom[:,np.newaxis,:]
radius = np.concatenate((radius,cyl.radius[np.newaxis]))
usub_start=usub_stop
usub_stop=usub_stop+1
f,g,X,Y,alpha,beta,dmin=seg.segdist(A,B,C,D,hard=True)
intersect2D[:,usub_start:usub_stop,:]=g
# import ipdb
# ipdb.set_trace()
#USEFUL Lines for debug
#########################
# def plt3d(ndev=53,ncyl=0,kl=11499):
# fig=plt.figure()
# ax=fig.add_subplot(111,projection='3d')
# if not isinstance(kl,list):
# kl=[kl]
# for ktime in kl:
# ax.plot([A[0,ndev,ktime],B[0,ndev,ktime]],[A[1,ndev,ktime],B[1,ndev,ktime]],[A[2,ndev,ktime],B[2,ndev,ktime]])
# [ax.plot([C[0,k,ktime],D[0,k,ktime]],[C[1,k,ktime],D[1,k,ktime]],[C[2,k,ktime],D[2,k,ktime]],'k') for k in range(11) ]
# ax.plot([X[0,ndev,ncyl,ktime],Y[0,ndev,ncyl,ktime]],[X[1,ndev,ncyl,ktime],Y[1,ndev,ncyl,ktime]],[X[2,ndev,ncyl,ktime],Y[2,ndev,ncyl,ktime]])
# ax.auto_scale_xyz([-5, 5], [-5, 5], [0, 2])
# plt.show()
# import ipdb
# ipdb.set_trace()
uinter1 = np.where((intersect2D<=(radius-0.01)))
uinter0 = np.where((intersect2D>(radius-0.01)))
# intersect2D_=copy.copy(intersect2D)
intersect2D[uinter1[0],uinter1[1],uinter1[2]]=1
intersect2D[uinter0[0],uinter0[1],uinter0[2]]=0
# #integrate the effect of all bodies by summing on axis 1
intersect = np.sum(intersect2D,axis=1)>0
if square_mda:
dev= np.unique(links)
ddev = dict(zip(dev,range(len(dev))))
lmap = np.array(map(lambda x: (ddev[x[0]],ddev[x[1]]),links))
M = np.nan*np.ones((len(dev),len(dev),intersect.shape[-1]))
for i in range(len(intersect)):
id1 = lmap[i][0]
id2 = lmap[i][1]
M[id1,id2,:]=intersect[i,:]
M[id2,id1,:]=intersect[i,:]
intersect=M
links = dev
self._visilinks = links
self._visiintersect = intersect
return intersect,links
def imshowvisibility(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda
Parameters
----------
techno : (HKB|TCR)
t : float
time in second
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
See Also
--------
pylayers.measures.CorSer.compute_visibility()
"""
defaults = { 'grid':True,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fig' not in kwargs:
fig = plt.figure()
else:
fig = kwargs.pop('fig')
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs.pop('ax')
if not '_visiintersect' in dir(self):
print( 'Visibility computed only once')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
kt=np.where(self.tmocap <= t)[0][-1]
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
ax.imshow(inter[:,:,kt],interpolation='nearest')
if kwargs['grid']:
ax.grid()
return fig,ax
def _show3i(self,t=0,**kwargs):
""" show3 interactive
"""
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _show3idemo(self,t=0,**kwargs):
""" show3 interactive
"""
defaults={'nodename':'TorsoTopLeft'}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
vline0.set_data(([time[value],time[value]],[0,1]))
vline1.set_data(([time[value],time[value]],[0,1]))
vline2.set_data(([time[value],time[value]],[0,1]))
vline3.set_data(([time[value],time[value]],[0,1]))
fig.canvas.draw_idle()
fig2.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
plt.close(fig2)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
fig2,ax2 = plt.subplots(4,1,figsize=(12,6))
ax2=ax2.ravel()
df0 = self.getlink(kwargs['nodename'],'AP1',techno='HKB')
df0.plot(ax=ax2[0],fig=fig2)
df1 = self.getlink(kwargs['nodename'],'AP2',techno='HKB')
df1.plot(ax=ax2[1],fig=fig2)
df2 = self.getlink(kwargs['nodename'],'AP3',techno='HKB')
df2.plot(ax=ax2[2],fig=fig2)
df3 = self.getlink(kwargs['nodename'],'AP4',techno='HKB')
df3.plot(ax=ax2[3],fig=fig2)
ax2[0].set_ylabel('AP1')
ax2[1].set_ylabel('AP2')
ax2[2].set_ylabel('AP3')
ax2[3].set_ylabel('AP4')
vline0 = ax2[0].axvline(x=time[fId], color='red')
vline1 = ax2[1].axvline(x=time[fId], color='red')
vline2 = ax2[2].axvline(x=time[fId], color='red')
vline3 = ax2[3].axvline(x=time[fId], color='red')
fig2.suptitle(kwargs['nodename'])
plt.show()
def __refreshshow3i(self,kt):
""" show3 update for interactive mode
USED in imshowvisibility_i
"""
t=self.tmocap[kt]
for ib,b in enumerate(self.B):
self.B[b].settopos(t=t,cs=True)
try:
# body
X=np.hstack((self.B[b]._pta,self.B[b]._phe))
self.B[b]._mayapts.mlab_source.set(x=X[0,:], y=X[1,:], z=X[2,:])
# device
udev = [self.B[b].dev[i]['uc3d'][0] for i in self.B[b].dev]
Xd=self.B[b]._f[kt,udev,:].T
self.B[b]._mayadev.mlab_source.set(x=Xd[0,:], y=Xd[1,:], z=Xd[2,:])
# name
uupper = np.where(X[2]==X[2].max())[0]
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(X[0,uupper][0],X[1,uupper][0],X[2,uupper][0],self.B[b].name,scale=0.05,color=(1,0,0))
# s = np.hstack((cylrad,cylrad))
except:
# cylinder
X=np.vstack((self.B[b].top,self.B[b].bottom))
self.B[b]._mayapts.mlab_source.set(x=X[:,0], y=X[:,1], z=X[:,2])
# name
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(self.B[b].top[0],self.B[b].top[1],self.B[b].top[2],self.B[b].name,scale=0.05,color=(1,0,0))
#vdict
V = self.B[b].traj[['vx','vy','vz']].iloc[self.B[b].toposFrameId].values
self.B[b]._mayavdic.mlab_source.set(x= self.B[b].top[0],y=self.B[b].top[1],z=self.B[b].top[2],u=V[ 0],v=V[ 1],w=V[ 2])
def imshowvisibility_i(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda interactive
Parameters
----------
inter : (nb link x nb link x timestamps)
links : (nblinks)
time : intial time (s)
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.visimda(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
"""
# if in_ipynb():
# notebook = False #program launch in ipyhon notebook
# from IPython.html import widgets # Widget definitions
# from IPython.display import display, clear_output# Used to display widgets in the notebook
# else :
# notebook = False
if not '_visiintersect' in dir(self):
print( 'Visibility is computed only once, Please wait\n')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
time=self.tmocap
fId = np.where(time<=t)[0][-1]
vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
poly = plt.Polygon(vertc)
pp = ax.add_patch(poly)
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
l=ax.imshow(inter[:,:,fId],interpolation='nearest')
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.15, 0.8, 0.05])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, inter.shape[-1],
valinit=fId, color='#AAAAAA')
# else :
# int_range = widgets.IntSliderWidget(min=0,max=inter.shape[-1],step=1,value=fId)
# display(int_range)
def update_x(val):
value = int(sliderx.val)
sliderx.valtext.set_text('{}'.format(value))
l.set_data(inter[:,:,value])
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# else:
# def update_x(name,value):
# clear_output(wait=True)
# display(plt.gcf())
# plt.imshow(inter[:,:,value],interpolation='nearest')
# # l.set_data(inter[:,:,value])
# kwargs['bodytime']=[self.tmocap[value]]
# self._show3(**kwargs)
# myu.inotshow('fig1',width=200,height=200,magnification=1)
# # slax.set_title('t='+str(time[val]),loc='left')
# # fig.canvas.draw_idle()
# int_range.on_trait_change(update_x, 'value')
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
# #QUIT by pressing 'q'
# def press(event):
# if event.key == 'q':
# mlab.close(mayafig)
# plt.close(fig)
# fig.canvas.mpl_connect('key_press_event', press)
# if not notebook:
#-1 frame axes
axm = plt.axes([0.3, 0.05, 0.1, 0.075])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.075])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.9, 0.05, 0.1, 0.075])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _distancematrix(self):
"""Compute the distance matrix between the nodes
self.dist : (nb frame x nb_node x nb_node)
self.dist_nodesmap : list of used nodes (useful to make the association ;) )
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
bn= []
for b in B:
if 'dev' in dir(B[b]):
tdev=[]
for k in B[b].dev:
bn.append(k)
tdev.append(B[b].dev[k]['uc3d'][0])
tdev=np.array(tdev)
try:
pnb = np.concatenate((pnb,B[b]._f[:,tdev,:]),axis=1)
except:
pnb = B[b]._f[:,tdev,:]
ln = []
uin = []
# infrastructure nodes
if ('HK' in self.typ) or ('FULL' in self.typ):
uin.extend(['HKB:1','HKB:2','HKB:3','HKB:4'])
if ('TCR' in self.typ) or ('FULL' in self.typ):
# TCR:31 is the coordinator (1.7719,-3.26)
uin.extend(['TCR:32','TCR:24','TCR:27','TCR:28','TCR:31'])
if self.day == 12:
if ('BS' in self.typ) or ('FULL' in self.typ):
uin.extend(['BS:74','BS:157'])
ln = uin + bn
pin = np.array([self.din[d]['p'] for d in uin])
pin2 = np.empty((pnb.shape[0],pin.shape[0],pin.shape[1]))
pin2[:,:,:] = pin
p = np.concatenate((pin2,pnb),axis=1)
self.points = p
self.dist = np.sqrt(np.sum((p[:,:,np.newaxis,:]-p[:,np.newaxis,:,:])**2,axis=3))
self.dist_nodesmap = ln
def _computedistdf(self):
"""Compute the distance dataframe from distance matrix
"""
# HIKOB
if ('HK' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'hkb')[0]:self.devmapper(k,'hkb')[2] for k in self.dHKB}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.hkb.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
df = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
# BE Spoon
if ('BS' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'BS')[0]:self.devmapper(k,'BS')[2] for k in self.dBS}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.bespo.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dfb = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
df = df.join(dfb)
del dfb
if ('TCR' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'tcr')[0]:self.devmapper(k,'tcr')[2] for k in self.dTCR}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),
self.dist_nodesmap.index(devmap[k.split('-')[1]])]
for k in self.tcr.keys() ])
# for k in self.tcr.keys() if not 'COORD' in k])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dft = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
if ('FULL' in self.typ):
df = df.join(dft)
else :
df = dft
del dft
self.distdf=df
# def accessdm(self,a,b,techno=''):
# """ access to the distance matrix
# give name|id of node a and b and a given techno. retrun Groung truth
# distance between the 2 nodes
# # """
# # a,ia,bia,subja=self.devmapper(a,techno)
# # b,ib,bib,subjb=self.devmapper(b,techno)
# if 'HKB' in techno :
# if isinstance(a,str):
# ia = self.dHKB[a]
# else:
# ia = a
# a = self.idHKB[a]
# if isinstance(b,str):
# ib = self.dHKB[b]
# else:
# ib = b
# b = self.idHKB[b]
# elif 'TCR' in techno :
# if isinstance(a,str):
# ia = self.dTCR[a]
# else:
# ia = a
# a = self.idTCR[a]
# if isinstance(b,str):
# ib = self.dTCR[b]
# else:
# ib = b
# b = self.idTCR[b]
# else :
# raise AttributeError('please give only 1 techno or radio node')
# ka = techno+':'+str(ia)
# kb = techno+':'+str(ib)
# ua = self.dist_nodesmap.index(ka)
# ub = self.dist_nodesmap.index(kb)
# return(ua,ub)
# c3ds = self.B._f.shape
# if 'Full' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(self.tcr)+len(bs),3))
# elif 'HK' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(bs),3))
# elif 'TCR' in self.typ:
# pdev= np.empty((c3ds[0],len(self.tcr),3))
# else:
# raise AttributeError('invalid self.typ')
# self.B.network()
# DB = self.B.D2
# ludev = np.array([[i,self.B.dev[i]['uc3d'][0]] for i in self.B.dev])
# for i in ludev:
# pdev[:,eval(i[0])-1,:] = self.B._f[:,i[1],:]
# # self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def vlc(self):
""" play video of the associated serie
"""
videofile = os.path.join(self.rootdir,'POST-TREATED', str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
try:
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
os.system('vlc '+filename +'&' )
except:
raise AttributeError('file '+ self._filename + ' not found')
def snapshot(self,t0=0,offset=15.5,title=True,save=False,fig=[],ax=[],figsize=(10,10)):
""" single snapshot plot
Parameters
----------
t0: float
offset : float
title : boolean
save : boolean
fig
ax
figsize : tuple
"""
if fig ==[]:
fig=plt.figure(figsize=figsize)
if ax == []:
ax = fig.add_subplot(111)
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
I0 = img_as_ubyte(F0)
ax.imshow(F0)
if title:
ax.set_title('t = '+str(t0)+'s')
if save :
plt.savefig(self._filename +'_'+str(t0) + '_snap.png',format='png')
return fig,ax
def snapshots(self,t0=0,t1=10,offset=15.5):
""" take snapshots
Parameters
----------
t0 : float
t1 : float
"""
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
F1 = vc.get_frame(t1+offset)
I0 = img_as_ubyte(F0)
I1 = img_as_ubyte(F1)
plt.subplot(121)
plt.imshow(F0)
plt.title('t = '+str(t0)+'s')
plt.subplot(122)
plt.imshow(F1)
plt.title('t = '+str(t1)+'s')
def _show3(self,**kwargs):
""" mayavi 3d show of scenario
Parameters
----------
L : boolean
display layout (True)
body :boolean
display bodytime(True)
bodyname : boolean
display body name
bodytime: list
list of time instant where body topos has to be shown
devsize : float
device on body size (100)
devlist : list
list of device name to show on body
pattern : boolean
display devices pattern
trajectory : boolean
display trajectory (True)
tagtraj : boolean
tag on trajectory at the 'bodytime' instants (True)
tagname : list
name of the tagtrajs
tagpoffset : ndarray
offset of the tag positions (nb_of_tags x 3)
fontsizetag : float
size of the tag names
inodes : boolean
display infrastructure nodes
inname : boolean
display infra strucutre node name
innamesize : float,
size of name of infrastructure nodes (0.1)
incolor: str
color of infrastructure nodes ('r')
insize
size of infrastructure nodes (0.1)
camera : boolean
display Vicon camera position (True)
cameracolor : str
color of camera nodes ('b')
camerasize : float
size of camera nodes (0.1)
Examples
--------
>>> S = Corser(6)
>>> S._show3()
"""
defaults = { 'L':True,
'body':True,
'bodyname':True,
'subject':[],
'interf':True,
'trajectory' :False,
'trajectory_list' :[],
'devsize':100,
'devlist':[],
'pattern':False,
'inodes' : True,
'inname' : True,
'innamesize' : 0.1,
'incolor' : 'r',
'insize' : 0.1,
'camera':True,
'cameracolor' :'k',
'camerasize' :0.1,
'bodytime':[],
'tagtraj':True,
'tagname':[],
'tagpoffset':[],
'fontsizetag':0.1,
'trajectory_color_range':True,
'trajectory_linewidth':0.01
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
cold = pyu.coldict()
camhex = cold[kwargs['cameracolor']]
cam_color = tuple(pyu.rgb(camhex)/255.)
inhex = cold[kwargs['incolor']]
in_color = tuple(pyu.rgb(inhex)/255.)
if kwargs['subject'] == []:
subject = self.subject
else:
subject = kwargs['subject']
if kwargs['L']:
self.L._show3(opacity=0.5)
v = self.din.items()
if kwargs['inodes']:
X= np.array([v[i][1]['p'] for i in range(len(v))])
mlab.points3d(X[:,0],X[:,1], X[:,2],scale_factor=kwargs['insize'],color=in_color)
if kwargs['pattern']:
for i in range(len(v)):
if not hasattr(self.din[v[i][0]]['ant'],'SqG'):
self.din[v[i][0]]['ant'].eval()
self.din[v[i][0]]['ant']._show3(po=v[i][1]['p'],
T=self.din[v[i][0]]['T'],
ilog=False,
minr=0.01,
maxr=0.2,
newfig=False,
title=False,
colorbar=False,
)
if kwargs['inname']:
[mlab.text3d(v[i][1]['p'][0],
v[i][1]['p'][1],
v[i][1]['p'][2]+v[i][1]['s3off'],
v[i][0],
scale=kwargs['innamesize'],color=in_color) for i in range(len(v))]
if kwargs['body']:
if kwargs['bodytime']==[]:
time =np.linspace(0,self.B[subject[0]].time[-1],5).astype(int)
# time=range(10,100,20)
else :
time=kwargs['bodytime']
for ki, i in enumerate(time):
for ib,b in enumerate(subject):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(dev=True,
name = kwargs['bodyname'],
devlist=kwargs['devlist'],
devsize=kwargs['devsize'],
tube_sides=12,
pattern=kwargs['pattern'])
if kwargs['tagtraj']:
X=self.B[b].traj[['x','y','z']].values[self.B[b].toposFrameId]
if kwargs['tagpoffset']==[]:
X[2]=X[2]+0.2
else :
X=X+kwargs['tagpoffset'][ki]
if kwargs['tagname']==[]:
name = 't='+str(i)+'s'
else :
name = str(kwargs['tagname'][ki])
mlab.text3d(X[0],X[1],X[2],name,scale=kwargs['fontsizetag'])
if kwargs['interf']:
for ib,b in enumerate(self.interf):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(name=kwargs['bodyname'],tube_sides=12)
if kwargs['trajectory']:
if kwargs['trajectory_list']==[]:
tr_subject = subject
else:
tr_subject = kwargs['trajectory_list']
for b in tr_subject:
self.B[b].traj._show3(color_range=kwargs['trajectory_color_range'],
linewidth=kwargs['trajectory_linewidth'])
if kwargs['camera'] :
mlab.points3d(self.cam[:,0],self.cam[:,1], self.cam[:,2],scale_factor=kwargs['camerasize'],color=cam_color)
mlab.view(-111.44127634143871,
60.40674368088245,
24.492297713984197,
array([-0.07235499, 0.04868631, -0.00314969]))
mlab.view(-128.66519195313163,
50.708933839573511,
24.492297713984247,
np.array([-0.07235499, 0.04868631, -0.00314969]))
def anim(self):
self._show3(body=False,inname=False,trajectory=False)
[self.B[b].anim() for b in self.B]
mlab.view(-43.413544538477254,
74.048193730704611,
11.425837641867618,
array([ 0.48298163, 0.67806043, 0.0987967 ]))
def imshow(self,time=100,kind='time'):
""" DEPRECATED
Parameters
----------
kind : string
'mean','std'
"""
fig = plt.figure(figsize=(10,10))
self.D = self.rssi-self.rssi.swapaxes(0,1)
try:
timeindex = np.where(self.thkb[0]-time>0)[0][0]
except:
timeindex = np.where(self.thkb-time>0)[0][0]
if kind=='time':
dt1 = self.rssi[:,:,timeindex]
dt2 = self.D[:,:,timeindex]
if kind == 'mean':
dt1 = ma.masked_invalid(self.rssi).mean(axis=2)
dt2 = ma.masked_invalid(self.D).mean(axis=2)
if kind == 'std':
dt1 = ma.masked_invalid(self.rssi).std(axis=2)
dt2 = ma.masked_invalid(self.D).std(axis=2)
ax1 = fig.add_subplot(121)
#img1 = ax1.imshow(self.rssi[:,:,timeindex],interpolation='nearest',origin='lower')
img1 = ax1.imshow(dt1,interpolation='nearest')
labels = [ self.idHKB[x] for x in range(1,17)]
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
if kind=='time':
plt.title('t = '+str(time)+ ' s')
if kind=='mean':
plt.title(u'$mean(\mathbf{L})$')
if kind=='std':
plt.title(u'$std(\mathbf{L})$')
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
clb1 = fig.colorbar(img1,cax1)
clb1.set_label('level dBm',fontsize=14)
ax2 = fig.add_subplot(122)
#img2 = ax2.imshow(self.D[:,:,timeindex],interpolation='nearest',origin='lower')
img2 = ax2.imshow(dt2,interpolation='nearest')
plt.title(u'$\mathbf{L}-\mathbf{L}^T$')
divider = make_axes_locatable(ax2)
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
clb2 = fig.colorbar(img2,cax2)
clb2.set_label('level dBm',fontsize=14)
plt.tight_layout()
plt.show()
#for k in range(1,17):
# for l in range(1,17):
# self.dHKB[(k,l)]=iHKB[k]+' - '+iHKB[l]
# cpt = cpt + 1
return fig,(ax1,ax2)
def lk2nd(self,lk):
""" transcode a lk from Id to real name
Parameters
----------
lk : string
Examples
--------
>>> C=Corser(6)
>>> lk = 'HKB:15-HKB:7'
>>> C.lk2nd(lk)
"""
u = lk.replace('HKB:','').split('-')
v = [ self.idHKB[int(x)] for x in u ]
return(v)
def _load_offset_dict(self):
""" load offset_dictionnary.bin
Returns
-------
d : dict
{'Sc20_S5_R1_HKBS': {'hkb_index': -148, 'video_sec': 32.622087273809527},
'Sc20_S6_R2_HKBS': {'bs_index': -124, 'hkb_index': -157},
'Sc21a_S13_R1_HKBS': {'hkb_index': 537},
'Sc21a_S14_R2_HKBS': {'hkb_index': 752},
'Sc21a_S15_R3_HKBS': {'hkb_index': 438},
'Sc21a_S16_R4_HKBS': {'hkb_index': 224},
'Sc21b_S21_R1_HKBS': {'hkb_index': 368},
'Sc21b_S22_R2_HKBS': {'hkb_index': -333},
'Sc21b_S23_R3_HKBS': {'hkb_index': 136},
'Sc22a_S9_R1_Full': {'hkb_index': 678}}
Notes
-----
This is used for synchronization purpose
"""
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.load( open( os.path.join(path,'offset_dictionnary.bin'), "rb" ) )
return d
def _save_offset_dict(self,d):
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.dump( d, open( os.path.join(path,'offset_dictionnary.bin'), "wb" ) )
def _save_data_off_dict(self,filename,typ,value):
""" save
- a given "value" of an for,
- a serie/run "filename",
- of a given typ (video|hkb|tcr|...)
"""
d = self._load_offset_dict()
try:
d[filename].update({typ:value})
except:
d[filename]={}
d[filename][typ]=value
self._save_offset_dict(d)
def offset_setter_video(self,a='AP1',b='WristRight',**kwargs):
""" video offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig, axs = plt.subplots(nrows=2,ncols=1)
fig.subplots_adjust(bottom=0.3)
if isinstance(a,str):
ia = self.dHKB[a]
else:
ia = a
a = self.idHKB[a]
if isinstance(b,str):
ib = self.dHKB[b]
else:
ib = bq
b = self.idHKB[b]
time = self.thkb
if len(time) == 1:
time=time[0]
sab = self.hkb[a+'-'+b].values
sabt = self.hkb[a+'-'+b].index
hkb = axs[1].plot(sabt,sab,label = a+'-'+b)
axs[1].legend()
try :
init = self.offset[self._filename]['video_sec']
except:
init=time[0]
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(init)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.05])
sliderx = Slider(slide_xoffset_ax, "video offset", 0, self.hkb.index[-1],
valinit=init, color='#AAAAAA')
# vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
def update_x(val):
F0 = vc.get_frame(val)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# def cursor(val):
# try :
# pp.remove()
# except:
# pass
# vertc = [(sabt[0]+val,min(sab)-10),(sabt[0]+val,min(sab)-10),(sabt[0]+val,max(sab)+10),(sabt[0]+val,max(sab)-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
# sliderx.on_changed(cursor)
def plus(event):
sliderx.set_val(sliderx.val +0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def setter(event):
self._save_data_off_dict(self._filename,'video_sec',sliderx.val)
self.offset= self._load_offset_dict()
axp = plt.axes([0.3, 0.05, 0.1, 0.075])
axset = plt.axes([0.5, 0.05, 0.1, 0.075])
axm = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '<-')
bp.on_clicked(minus)
bset = Button(axset, 'SET offs.')
bset.on_clicked(setter)
bm = Button(axm, '->')
bm.on_clicked(plus)
plt.show()
def offset_setter(self,a='HKB:1',b='HKB:12',techno='',**kwargs):
""" offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if plt.isinteractive():
interactive = True
plt.ioff()
else :
interactive = False
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.3)
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
time = self.tmocap
if len(time.shape) == 2:
time = time[0,:]
try :
init = time[0]#self.offset[self._filename]['hkb_index']
except:
init=time[0]
var = self.getlinkd(ia,ib,techno).values
if kwargs['inverse']:
var = 10*np.log10(1./(var)**2)
gt = ax.plot(time,var)
ab = self.getlink(ia,ib,techno)
sab = ab.values
sabt = ab.index.values
technoval = ax.plot(sabt,sab)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
sliderx = Slider(slide_xoffset_ax, techno + " offset", -(len(sabt)/16), (len(sabt)/16),
valinit=init, color='#AAAAAA')
slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
valinit=0, color='#AAAAAA')
slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 60,
valinit=30, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
rtechnoval = np.roll(sab,value)
sliderx.valtext.set_text('{}'.format(value))
technoval[0].set_xdata(sabt)
technoval[0].set_ydata(rtechnoval)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
sliderx.drawon = False
def update_y(val):
yoff = slidery.val
alpha = slideralpha.val
gt[0].set_ydata(alpha*var + yoff)
fig.canvas.draw_idle()
#initpurpose
update_y(5)
slidery.on_changed(update_y)
slideralpha.on_changed(update_y)
def setter(event):
value = int(sliderx.val)
try :
nval = self.offset[self._filename][techno.lower()+'_index'] + value
except :
nval = value
self._save_data_off_dict(self._filename,techno.lower()+'_index',nval)
self.offset= self._load_offset_dict()
ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
axset = plt.axes([0.0, 0.5, 0.2, 0.05])
bset = Button(axset, 'SET ' +techno+' offs.')
bset.on_clicked(setter)
plt.show()
if interactive :
plt.ion()
# def offset_setter_hkb(self,a='AP1',b='WristRight',**kwargs):
# """ offset setter
# """
# defaults = { 'inverse':True
# }
# for k in defaults:
# if k not in kwargs:
# kwargs[k] = defaults[k]
# if plt.isinteractive():
# interactive = True
# plt.ioff()
# else :
# interactive = False
# fig, ax = plt.subplots()
# fig.subplots_adjust(bottom=0.2, left=0.3)
# a,ia,bia,subja,techno=self.devmapper(a,'HKB')
# b,ib,bib,subjb,techno=self.devmapper(b,'HKB')
# time = self.thkb
# if len(time.shape) == 2:
# time = time[0,:]
# try :
# init = time[0]#self.offset[self._filename]['hkb_index']
# except:
# init=time[0]
# var = self.getlinkd(ia,ib,'HKB').values
# if kwargs['inverse']:
# var = 10*np.log10(1./(var)**2)
# gt = ax.plot(self.B[self.B.keys()[0]].time,var)
# sab = self.hkb[a+'-'+b].values
# sabt = self.hkb[a+'-'+b].index
# hkb = ax.plot(sabt,sab)
# ########
# # slider
# ########
# slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
# sliderx = Slider(slide_xoffset_ax, "hkb offset", -(len(sabt)/16), (len(sabt)/16),
# valinit=init, color='#AAAAAA')
# slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
# slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
# valinit=0, color='#AAAAAA')
# slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
# slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 10,
# valinit=5, color='#AAAAAA')
# def update_x(val):
# value = int(sliderx.val)
# rhkb = np.roll(sab,value)
# sliderx.valtext.set_text('{}'.format(value))
# hkb[0].set_xdata(sabt)
# hkb[0].set_ydata(rhkb)
# fig.canvas.draw_idle()
# sliderx.on_changed(update_x)
# sliderx.drawon = False
# def update_y(val):
# yoff = slidery.val
# alpha = slideralpha.val
# gt[0].set_ydata(alpha*var + yoff)
# fig.canvas.draw_idle()
# #initpurpose
# update_y(5)
# slidery.on_changed(update_y)
# slideralpha.on_changed(update_y)
# def setter(event):
# value = int(sliderx.val)
# try :
# nval = self.offset[self._filename]['hkb_index'] + value
# except :
# nval = value
# self._save_data_off_dict(self._filename,'hkb_index',nval)
# self.offset= self._load_offset_dict()
# ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
# axset = plt.axes([0.0, 0.5, 0.2, 0.05])
# bset = Button(axset, 'SET offs.')
# bset.on_clicked(setter)
# plt.show()
# if interactive:
# plt.ion()
def mtlbsave(self):
""" Matlab format save
S{day}_{serie}
node_name
node_place
node_coord
HKB.{linkname}.tr
HKB.{linkname}.rssi
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
HKB.{linkname}.dsh
TCR.{linkname}.tr
HKB.{linkname}.range
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
"""
key = 'S'+str(self.day)+'_'+str(self.serie)
filemat = key+'.mat'
d = {}
d[key]={}
d[key]['node_name'] = self.dist_nodesmap
d[key]['node_place'] = [ self.devmapper(x)[0] for x in self.dist_nodesmap ]
d[key]['node_coord'] = self.points
for subject in self.interf:
sub = subject.replace(':','')
d[key][sub]=np.mean(self.B[subject].d,axis=1)
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['HKB']={}
links = list(self.hkb.columns)
inter,lks = self.compute_visibility(techno='HKB')
for l in links:
ls = l.split('-')
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['HKB'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 = np.where(lks==ls[1])[0]
Ssh = inter[ix0,ix1,:]
Srssi= self.getlink(ls[0],ls[1],techno='HKB')
# get distances between nodes
Sdist = self.getlinkd(ls[0],ls[1],techno='HKB')
dsh = dist_sh2rssi(Sdist,Ssh,15)
# rssi
d[key]['HKB'][nl]['rssi'] = Srssi.values
# dsh
d[key]['HKB'][nl]['dsh'] = dsh
#d['S6'][nl]['rssi_dec'] = np.roll(Srssi.values,-dec)
d[key]['HKB'][nl]['sh'] = Ssh
# time rssi
#d[key]['HKB'][nl]['trh'] = np.array(Srssi.index)
d[key]['trh'] = np.array(Srssi.index)
# distance
d[key]['HKB'][nl]['dist'] = Sdist.values
# time mocap
#d[key]['HKB'][nl]['td'] = np.array(Sdist.index)
d[key]['tm'] = np.array(Sdist.index)
if ('TCR' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['TCR']={}
links = list(self.tcr.columns)
inter,lks = self.compute_visibility(techno='TCR')
for l in links:
ls = l.split('-')
# to shorten matlab keys surname are replaced by first letter
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['TCR'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 = np.where(lks==ls[1])[0]
# intersection on the link
Ssh = inter[ix0,ix1,:]
Srange= self.getlink(ls[0],ls[1],techno='TCR')
# get distances between nodes
Sdist = self.getlinkd(ls[0],ls[1],techno='TCR')
# rssi
d[key]['TCR'][nl]['range'] = Srange.values
# dsh
#d['S6'][nl]['rssi_dec'] = np.roll(Srssi.values,-dec)
d[key]['TCR'][nl]['sh'] = Ssh
# time rssi
#d[key]['TCR'][nl]['tr'] = np.array(Srange.index)
d[key]['trt'] = np.array(Srange.index)
# distance
d[key]['TCR'][nl]['dist'] = Sdist.values
# time mocap
#d[key]['TCR'][nl]['td'] = np.array(Sdist.index)
d[key]['tm'] = np.array(Sdist.index)
self.matlab = d
io.savemat(filemat,d)
def pltvisi(self,a,b,techno='',**kwargs):
""" plot visibility between link a and b
Attributes
----------
color:
fill color
hatch:
hatch type
label_pos: ('top'|'bottom'|'')
postion of the label
label_pos_off: float
offset of postion of the label
label_mob: str
prefix of label in mobility
label_stat: str
prefix of label static
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'fig':[],
'figsize':(10,10),
'ax':[],
'color':'',
'hatch':'//',
'label_pos':'',
'label_pos_off':5,
'label_vis':'V',
'label_hide':'H'
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
aa= ax.axis()
a,ia,nna,subjecta,technoa = self.devmapper(a,techno)
b,ib,nnb,subjectb,technob = self.devmapper(b,techno)
vv,tv,tseg,itseg = self._visiarray(nna,nnb)
# vv.any : it exist NLOS regions
if vv.any():
if kwargs['color']=='':
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],
fill=False,
hatch=kwargs['hatch'],
fig=fig,ax=ax)
else :
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],
color=kwargs['color'],
hatch=kwargs['hatch'],
fig=fig,ax=ax)
if kwargs['label_pos']!='':
if kwargs['label_pos'] == 'top':
yposV = aa[3]-kwargs['label_pos_off']+0.5
yposH = aa[3]-kwargs['label_pos_off']-0.5
elif kwargs['label_pos'] == 'bottom':
yposV = aa[2]+kwargs['label_pos_off']+0.5
yposH = aa[2]+kwargs['label_pos_off']+0.5
xposV= tv[tseg.mean(axis=1).astype(int)]
xposH= tv[itseg.mean(axis=1).astype(int)]
[ax.text(x,yposV,kwargs['label_vis']+str(ix+1)) for ix,x in enumerate(xposV)]
[ax.text(x,yposH,kwargs['label_hide']+str(ix+1)) for ix,x in enumerate(xposH)]
return fig,ax
def pltmob(self,**kwargs):
""" plot mobility
Parameters
----------
subject: str
subject to display () if '', take the fist one from self.subject)
showvel : boolean
display filtered velocity
velth: float (0.7)
velocity threshold
fo : int (5)
filter order
fw: float (0.02)
0 < fw < 1 (fN <=> 1)
time_offset : int
add time_offset to start later
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> #f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'subject':'',
'fig':[],
'figsize':(10,10),
'ax':[],
'showvel':False,
'velth':0.07,
'fo':5,
'fw':0.02,
'ylim':(),
'time_offset':0,
'color':'gray',
'hatch':'',
'label_pos':'top',
'label_pos_off':2,
'label_mob':'M',
'label_stat':'S'
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
if kwargs['subject']=='':
subject=self.B.keys()[0]
else:
subject=kwargs['subject']
V=self.B[subject].traj[['vx','vy']].values
Vi=np.sqrt((V[:,0]**2+V[:,1]**2))
f=DF()
f.butter(kwargs['fo'],kwargs['fw'],'lowpass')
Vif=f.filter(Vi)
if kwargs['time_offset']>=0:
zmo = np.zeros(kwargs['time_offset'])
tmp = np.insert(Vif,zmo,0)
Vif = tmp[:len(Vif)]
else:
zmo = np.zeros(-kwargs['time_offset'])
tmp = np.concatenate((Vif,zmo))
Vif = tmp[-kwargs['time_offset']:len(Vif)-kwargs['time_offset']]
if kwargs['showvel']:
fig2 = plt.figure()
ax2=fig2.add_subplot(111)
ax2.plot(self.B[subject].time[:-2],Vif)
ax2.plot(Vif)
cursor2 = Cursor(ax2, useblit=True, color='gray', linewidth=1)
null = np.where(Vif<kwargs['velth'])[0]
unu1 = np.where(np.diff(null)!=1)[0]
unu2 = np.where(np.diff(null[::-1])!=-1)[0]
unu2 = len(null)-unu2
unu = np.concatenate((unu1,unu2))
unu = np.sort(unu)
sunu = unu.shape
if sunu[0]%2:
unu=np.insert(unu,-1,len(null)-1)
sunu = unu.shape
nullr=null[unu].reshape(sunu[0]/2,2)
if kwargs['ylim'] != ():
ylim = kwargs['ylim']
else :
axlim = ax.axis()
ylim = [axlim[2],axlim[3]]
fig , ax =plu.rectplot(self.B[subject].time,nullr,ylim=ylim,
color=kwargs['color'],
hatch=kwargs['hatch'],
fig=fig,ax=ax)
inullr = copy.copy(nullr)
bb = np.insert(inullr[:,1],0,0)
ee = np.hstack((inullr[:,0],null[-1]))
inullr = np.array((bb,ee)).T
# remove last
inullr = inullr[:-1,:]
if kwargs['label_pos']!='':
if kwargs['label_pos'] == 'top':
yposM = ylim[1]-kwargs['label_pos_off']+0.5
yposS = ylim[1]-kwargs['label_pos_off']-0.5
elif kwargs['label_pos'] == 'bottom':
yposM = ylim[0]+kwargs['label_pos_off']+0.5
yposS = ylim[0]+kwargs['label_pos_off']+0.5
xposM= self.B[subject].time[nullr.mean(axis=1).astype(int)]
xposS= self.B[subject].time[inullr.mean(axis=1).astype(int)]
[ax.text(x,yposM,kwargs['label_mob']+str(ix+1),
horizontalalignment='center',
verticalalignment='center')
for ix,x in enumerate(xposM)]
[ax.text(x,yposS,kwargs['label_stat']+str(ix+1),
horizontalalignment='center',
verticalalignment='center')
for ix,x in enumerate(xposS)]
return fig,ax
def animhkb(self,a,b,interval=10,save=False):
"""
Parameters
----------
a : node name |number
b : node name | number
save : bool
"""
import matplotlib.animation as animation
x = self.hkb.index
link = a+'-'+b
y = self.hkb[link].values
fig, ax = plt.subplots()
plt.xlim(0,x[-1])
line = [ax.plot(x, y, animated=True)[0]]
def animate(i):
line[0].set_ydata(y[:i])
line[0].set_xdata(x[:i])
return line
ani = animation.FuncAnimation(fig, animate, xrange(1, len(x)),
interval=interval, blit=True)
if save:
ani.save(link+'.mp4')
plt.title(link)
plt.xlabel('time (s)')
plt.ylabel('RSS (dBm)')
plt.show()
def animhkbAP(self,a,AP_list,interval=1,save=False,**kwargs):
"""
Parameters
----------
a : node name
AP_nb=[]
save : bool
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> S.animhkbAP('TorsoTopLeft',['AP1','AP2','AP3','AP4'],interval=100,xstart=58,figsize=(20,2))
"""
import matplotlib.animation as animation
defaults = { 'fig':[],
'figsize':(10,10),
'ax':[],
'label':'',
'xstart':0
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
ust = np.where(self.hkb.index>=kwargs['xstart'])[0][0]
x = self.hkb.index[ust:]
links = [l+'-'+a for l in AP_list]
ly = [self.hkb[l].values[ust:] for l in links]
color=['k','b','g','r']
plt.xlim(kwargs['xstart'],x[-1]+3)
line = [ax.plot(x, y, animated=True,
color=color[iy],
label=AP_list[iy]+'-'+kwargs['label'])[0] for iy,y in enumerate(ly)]
def animate(i):
for iy,y in enumerate(ly):
line[iy].set_ydata(y[:i])
line[iy].set_xdata(x[:i])
return line
plt.legend()
plt.xlabel('time (s)')
plt.ylabel('RSS (dBm)')
ani = animation.FuncAnimation(fig, animate, xrange(0, len(x)),
interval=interval, blit=True)
if save:
ani.save(a+'.mp4')
#plt.title(links)
plt.show()
def plot(self,a,b,techno='',t='',**kwargs):
""" ploting
Parameters
----------
a : str | int
name |id
b : str | int
name |id
techno : str (optional)
radio techno
t : float | list (optional)
given time
or [start,stop] time
color : color
distance : boolean (False)
plot distance instead of value
lin : boolean (False)
display linear value instead of dB
sqrtinv : boolean (False)
apply : "sqrt (1/ dataset)"
xoffset : float (0)
add an offset on x axis
yoffset : float (1|1e3|1e6)
add an offset on y axis
title : boolean (True)
display title
shortlabel : boolean (True)
enable short labelling
fontsize : int (18)
font size
returnlines : boolean
if True return the matplotlib ploted lines
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plot('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> #f,ax = S.pltmob(fig=f,ax=ax)
>>> #plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'fig':[],
'ax':[],
'figsize':(6,4),
'color':'g',
'distance':False,
'lin':False,
'xoffset':0,
'yoffset': 1e6,
'sqrtinv':False,
'title':True,
'shortlabel':True,
'fontsize':18,
'returnlines':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
###create a short labeling
if kwargs['shortlabel']:
#find uppercase position
uu = np.nonzero([l.isupper() or l.isdigit() for l in a])[0]
#cretae string from list
labela = ''.join([a[i] for i in uu])
uu = np.nonzero([l.isupper() or l.isdigit() for l in b])[0]
#cretae string from list
labelb = ''.join([b[i] for i in uu])
label = labela +'-'+labelb
else:
label = a+'-'+b
if kwargs['distance']:
label = 'dist ' + label
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
# get dataframe
if not kwargs['distance']:
df = self.getlink(a,b,techno,t)
title = 'Received Power between ' + label
ylabel = 'Received Power dBm'
else :
df = self.getlinkd(a,b,techno,t)
title = 'Distance between ' + label
ylabel = 'distance (m)'
#post processing on dataframe
if kwargs['lin']:
df = 10**(df/10) * kwargs['yoffset']
if kwargs['sqrtinv']:
df = np.sqrt(1./df)
ylabel = u'$ (mW)^{-1/2} linear scale$'
lines = df.plot(ax=ax,color=kwargs['color'],label=label)
# Managing labelling
if kwargs['title']:
ax.set_title(label=title,fontsize=kwargs['fontsize'])
if kwargs['lin']:
if kwargs['yoffset']==1:
ylabel = 'mW'
if kwargs['yoffset']==1e3:
ylabel = u'$\micro$W'
if kwargs['yoffset']==1e6:
ylabel = u'nW'
ax.set_ylabel(ylabel)
# if kwargs['data']==True:
# #ax.plot(self.thkb[0],self.rssi[ia,ib,:])
# #ax.plot(self.thkb[0],self.rssi[ib,ia,:])
# sab = self.hkb[a+'-'+b]
# if not(kwargs['dB']):
# sab = 10**(sab/10) * kwargs['yoffset']
# if kwargs['distance']:
# sab = np.sqrt(1/sab)
# if kwargs['reciprocal']:
# sba = 10**(sba/10 ) * kwargs['yoffset']
# sba = np.sqrt(1/sba)
# sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],label=label,xlim=(t0,t1))
# if kwargs['reciprocal']:
# sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],label=label)
# #title = 'Received Power ' + self.title1
# if kwargs['dis_title']:
# #title = self.title1+kwargs['tit']
# title = kwargs['tit']
# ax.set_title(label=title,fontsize=kwargs['fontsize'])
# if not kwargs['distance']:
# if kwargs['dB']:
# ax.set_ylabel('Received Power dBm')
# else:
# if kwargs['yoffset']==1:
# ax.set_ylabel('mW')
# if kwargs['yoffset']==1e3:
# ax.set_ylabel(u'$\micro$W')
# if kwargs['yoffset']==1e6:
# ax.set_ylabel(u'nW')
# else:
# ax.set_ylabel(u'$\prop (mW)^{-1/2} linear scale$')
# if kwargs['reciprocal']==True:
# # if kwargs['data']==True:
# # ax2=fig.add_subplot(212)
# r = self.hkb[a+'-'+b][self.hkb[a+'-'+b]!=0]- self.hkb[b+'-'+a][self.hkb[b+'-'+a]!=0]
# r[t0:t1].plot(ax=ax2)
# ax2.set_title('Reciprocity offset',fontsize=kwargs['fontsize'])
if not kwargs['returnlines']:
return fig,ax
else:
return fig,ax,lines
def plthkb(self,a,b,techno='HKB',**kwargs):
""" plot Hikob devices
DEPRECATED
Parameters
----------
a : node name |number
b : node name | number
t0 : start time
t1 : stop time
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'xoffset':0,
'yoffset': 1e6,
'reciprocal':False,
'dB':True,
'data':True,
'colorab':'g',
'colorba':'b',
'distance':False,
'fontsize':18,
'shortlabel':True,
'dis_title':True,
'xlim':(),
'tit':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
t0 =kwargs['t0']
t1 =kwargs['t1']
if t1 ==-1:
try:
t1=self.thkb[0][-1]
except:
t1=self.thkb[-1]
a,ia,bia,subja,technoa=self.devmapper(a,techno)
b,ib,bib,subjb,technob=self.devmapper(b,techno)
if kwargs['shortlabel']:
#find uppercase position
uu = np.nonzero([l.isupper() or l.isdigit() for l in a])[0]
#cretae string from list
labela = ''.join([a[i] for i in uu])
uu = np.nonzero([l.isupper() or l.isdigit() for l in b])[0]
#cretae string from list
labelb = ''.join([b[i] for i in uu])
label = labela +'-'+labelb
else:
label = a+'-'+b
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
if kwargs['reciprocal']:
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else :
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
if kwargs['data']==True:
#ax.plot(self.thkb[0],self.rssi[ia,ib,:])
#ax.plot(self.thkb[0],self.rssi[ib,ia,:])
sab = self.hkb[a+'-'+b]
if not(kwargs['dB']):
sab = 10**(sab/10) * kwargs['yoffset']
if kwargs['distance']:
sab = np.sqrt(1/sab)
if kwargs['reciprocal']:
sba = 10**(sba/10 ) * kwargs['yoffset']
sba = np.sqrt(1/sba)
sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],label=label,xlim=(t0,t1))
if kwargs['reciprocal']:
sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],label=label)
#title = 'Received Power ' + self.title1
if kwargs['dis_title']:
#title = self.title1+kwargs['tit']
title = kwargs['tit']
ax.set_title(label=title,fontsize=kwargs['fontsize'])
if not kwargs['distance']:
if kwargs['dB']:
ax.set_ylabel('Received Power dBm')
else:
if kwargs['yoffset']==1:
ax.set_ylabel('mW')
if kwargs['yoffset']==1e3:
ax.set_ylabel(u'$\micro$W')
if kwargs['yoffset']==1e6:
ax.set_ylabel(u'nW')
else:
ax.set_ylabel(u'$\prop (mW)^{-1/2} linear scale$')
if kwargs['reciprocal']==True:
# if kwargs['data']==True:
# ax2=fig.add_subplot(212)
r = self.hkb[a+'-'+b][self.hkb[a+'-'+b]!=0]- self.hkb[b+'-'+a][self.hkb[b+'-'+a]!=0]
r[t0:t1].plot(ax=ax2)
ax2.set_title('Reciprocity offset',fontsize=kwargs['fontsize'])
return fig,ax
def plttcr(self,a,b,**kwargs):
""" plot TCR devices
Parameters
----------
a : node name |number
b : node name | number
t0 : start time
t1 : stop time
"""
defaults = { 't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'data':True,
'colorab':'g',
'colorba':'b',
'linestyle':'default',
'inverse':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
t0 =kwargs['t0']
t1 =kwargs['t1']
if t1 ==-1:
t1=self.ttcr[-1]
if isinstance(a,str):
ia = self.dTCR[a]
else:
ia = a
a = self.idTCR[a]
if isinstance(b,str):
ib = self.dTCR[b]
else:
ib = b
b = self.idTCR[b]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax=kwargs['ax']
if kwargs['data']==True:
#ax.plot(self.thkb[0],self.rssi[ia,ib,:])
#ax.plot(self.thkb[0],self.rssi[ib,ia,:])
if kwargs['inverse']:
sab = 1./(self.tcr[a+'-'+b])**2
sba = 1./(self.tcr[b+'-'+a])**2
else:
sab = self.tcr[a+'-'+b]
sba = self.tcr[b+'-'+a]
sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],marker='o',linestyle=kwargs['linestyle'])
sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],marker='o',linestyle=kwargs['linestyle'])
ax.set_title(a+'-'+b)
return fig,ax
def pltgt(self,a,b,**kwargs):
""" plt ground truth
Parameters
----------
t0
t1
fig
ax
figsize: tuple
linestyle'
inverse :False,
display 1/distance instead of distance
log : boolean
display log for distance intead of distance
gammma':1.,
mulitplication factor for log : gamma*log(distance)
this can be used to fit RSS
mode : string
'HKB' | 'TCR' | 'FULL'
visi : boolean,
display visibility
color: string color ('k'|'m'|'g'),
color to display the visibility area
hatch': strin hatch type ('//')
hatch type to hatch visibility area
fontsize: int
title fontsize
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> S.pltgt('AP1','TorsoTopLeft')
"""
defaults = { 'subject':'',
't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'linestyle':'default',
'inverse':False,
'log':True,
'gamma':-40,
'mode':'HKB',
'visi': True,
'fontsize': 14,
'color':'k',
'hatch':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
#t0 =kwargs.pop('t0')
#t1 =kwargs.pop('t1')
#if t1 ==-1:
#t1=self.thkb[-1]
# t1=self.ttcr[-1]
label = a+'-'+b
mode = kwargs.pop('mode')
inverse = kwargs.pop('inverse')
log = kwargs.pop('log')
gamma = kwargs.pop('gamma')
visibility = kwargs.pop('visi')
fontsize = kwargs.pop('fontsize')
hatch = kwargs.pop('hatch')
subject = kwargs.pop('subject')
if subject=='':
subject=self.B.keys()[0]
else:
subject=subject
if kwargs['fig']==[]:
figsize = kwargs.pop('figsize')
kwargs.pop('fig')
fig = plt.figure(figsize=figsize)
else:
kwargs.pop('figsize')
fig = kwargs.pop('fig')
if kwargs['ax'] ==[]:
kwargs.pop('ax')
ax = fig.add_subplot(111)
else :
ax=kwargs.pop('ax')
if mode == 'HKB' or mode == 'FULL':
if isinstance(a,str):
iahk = self.dHKB[a]
else:
iahk = a
a = self.idHKB[a]
if isinstance(b,str):
ibhk = self.dHKB[b]
else:
ibhk = b
b = self.idHKB[b]
var = self.getlink(iahk,ibhk,'HKB')
#var = U.values
#time = U.index
#pdb.set_trace()
if inverse:
var = 1./(var)
ax.set_ylabel(u'$m^{-2}$',fontsize=fontsize)
if log :
#var = gamma*10*np.log10(var)
var = 20*np.log10(var)+gamma
ax.set_ylabel(u'$- 20 \log_{10}(d)'+str(gamma)+'$ (dB)',fontsize=fontsize)
plt.ylim(-65,-40)
else:
ax.set_ylabel(u'meters',fontsize=fontsize)
if log :
var = gamma*10*np.log10(var)+gamma
ax.set_ylabel(u'$10log_{10}m^{-2}$',fontsize=fontsize)
#ax.plot(self.B[subject].time,var,label=label,**kwargs)
var.plot()
#
# TCR |Full
#
if mode == 'TCR' or mode == 'FULL':
if isinstance(a,str):
iatcr = self.dTCR[a]
else:
iatcr = a
a = self.idTCR[a]
if isinstance(b,str):
ibtcr = self.dTCR[b]
else:
ibtcr = b
b = self.idTCR[b]
var = self.getlink(iatcr,ibtcr,'TCR').values
#if inverse:
# var = 1./(var)**2
# if log :
# var = gamma*10*np.log10(var)
#else:
# if log :
# var = gamma*10*np.log10(var)
#pdb.set_trace()
#ax.plot(self.B[subject].time,var,**kwargs)
ax.plot(self.B[subject].ttcr,var,**kwargs)
if visibility:
aa= ax.axis()
vv,tv,tseg,itseg = self._visiarray(a,b)
# vv.any : it exist NLOS regions
if vv.any():
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],color=kwargs['color'],hatch=hatch,fig=fig,ax=ax)
# for t in tseg:
#axs[cptax].plot(visi.index.values,visi.values,'r')
#if inverse:
# ax.set_title(u'Motion Capture Ground Truth : inverse of squared distance',fontsize=fontsize+1)
#else:
# ax.set_title('Motion Capture Ground Truth : evolution of distance (m)',fontsize=fontsize+1)
ax.set_xlabel('Time (s)',fontsize=fontsize)
plt.tight_layout()
return fig, ax
def pltlk(self,a,b,**kwargs):
""" plot links
Parameters
----------
a : string
node a name
b : string
node b name
display: list
techno to be displayed
figsize
t0: float
time start
t1 : float
time stop
colhk: plt.color
color of hk curve
colhk2:plt.color
color of hk curve2 ( if recirpocal)
linestylehk:
linestyle hk
coltcr:
color tcr curve
coltcr2:
color of tcr curve2 ( if recirpocal)
linestyletcr:
linestyle tcr
colgt:
color ground truth
inversegt:
invert ground truth
loggt: bool
apply a log10 factor to ground truth
gammagt:
applly a gamma factor to ground truth (if loggt ! )
fontsize:
font size of legend
visi:
display visibility indicator
axs :
list of matplotlib axes
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> S.pltlk('AP1','TorsoTopLeft')
"""
defaults = { 'display':[],
'figsize':(8,8),
't0':0,
't1':-1,
'colhk':'g',
'colhk2':'b',
'linestylehk':'default',
'coltcr':'g',
'coltcr2':'b',
'linestyletcr':'step',
'colgt': 'k',
'inversegt':True,
'loggt':True,
'gammagt':-40,
'fontsize':14,
'visi':True,
'axs' :[],
'gt':True,
'tit':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
display = kwargs.pop('display')
if not isinstance(display,list):
display=[display]
if display == []:
if ('tcr' in dir(self)) and ('hkb' in dir(self)):
display.append('FULL')
elif 'tcr' in dir(self):
display.append('TCR')
elif 'hkb' in dir(self):
display.append('HKB')
display = [t.upper() for t in display]
if 'FULL' in display:
ld = 2
elif 'TCR' in display or 'HKB' in display:
ld = 2
#Axes management
if kwargs['axs'] == []:
kwargs.pop('axs')
fig,axs = plt.subplots(nrows=ld,ncols=1,figsize=kwargs['figsize'],sharex=True)
else :
fig =plt.gcf()
axs = kwargs.pop('axs')
cptax= 0
# HKB plot
if 'HKB' in display or 'FULL' in display:
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
if isinstance(a,str):
iahk = self.dHKB[a]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
if isinstance(b,str):
ibhk = self.dHKB[b]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
else :
raise AttributeError('HK not available for the given scenario')
kwargs['fig']=fig
kwargs['ax']=axs[cptax]
kwargs['colorab']=kwargs.pop('colhk')
kwargs['colorba']=kwargs.pop('colhk2')
kwargs['linestyle']=kwargs.pop('linestylehk')
kwargs['tit']=kwargs.pop('tit')
fig,axs[cptax]=self.plthkb(a,b,reciprocal=False,**kwargs)
cptax+=1
else :
kwargs.pop('colhk')
kwargs.pop('colhk2')
kwargs.pop('linestylehk')
#TCR plot
if 'TCR' in display or 'FULL' in display:
if ('TCR' in self.typ.upper()) or ('FULL' in self.typ.upper()):
if isinstance(a,str):
iatcr = self.dTCR[a]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
if isinstance(b,str):
ibtcr = self.dTCR[b]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
else :
raise AttributeError('TCR not available for the given scenario')
kwargs['fig']=fig
kwargs['ax']=axs[cptax]
kwargs['colorab']=kwargs.pop('coltcr')
kwargs['colorba']=kwargs.pop('coltcr2')
kwargs['linestyle']=kwargs.pop('linestyletcr')
tcrlink = a+'-'+b
#plot only if link exist
if tcrlink in self.tcr:
fig,axs[cptax]=self.plttcr(a,b,**kwargs)
else :
kwargs.pop('coltcr')
kwargs.pop('coltcr2')
kwargs.pop('linestyletcr')
#cptax+=1
#
# Ground Truth
#
#
# HKB |Full
#
if kwargs.pop('gt'):
kwargs['color'] = kwargs.pop('colgt')
kwargs.pop('colorab')
kwargs.pop('colorba')
kwargs['ax']=axs[cptax]
kwargs['inverse']=kwargs.pop('inversegt')
kwargs['log']=kwargs.pop('loggt')
kwargs['gamma']=kwargs.pop('gammagt')
kwargs.pop('tit')
if 'HKB' in display or 'FULL' in display:
kwargs['mode']= 'HKB'
fig,axs[cptax] = self.pltgt(a,b,**kwargs)
elif 'TCR' in display or 'FULL' in display:
kwargs['mode']= 'TCR'
fig,axs[cptax] = self.pltgt(a,b,**kwargs)
return fig,axs
# aa = axs[cptax].axis()
#
# calculates visibility and display NLOS region
# as a yellow patch over the shadowed region
#
def showpattern(self,a,techno='HKB',**kwargs):
""" show pattern configuation for a given link and frame
Parameters
----------
a : int
link index
technoa : string
'HKB'|'TCR'|'BS'
technob
default 'HKB'|'TCR'|'BS'
phi : float
antenna elevation in rad
fig :
ax :
t : float
phi : float
pi/2
ap : boolean
"""
defaults = { 'fig':[],
'ax':[],
't':0,
'phi':np.pi/2.,
'ap':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig=plt.figure()
else :
fig = kwargs['fig']
if kwargs['ax'] == []:
ax=fig.add_subplot(111)
else :
ax = kwargs['ax']
# display nodes
#
#
#
a,ia,ba,subjecta,techno = self.devmapper(a,techno)
pa = self.getdevp(a,techno=techno,t=kwargs['t']).values
if len(pa.shape) >1:
pa=pa[0]
ax.plot(pa[0],pa[1],'ob')
ax.text(pa[0],pa[1],ba)
if subjecta != '':
self.B[subjecta].settopos(t=kwargs['t'])
self.B[subjecta].dev[ba]['ant'].eval()
xa,ya,z,sa,v = self.B[subjecta].dev[ba]['ant']._computemesh(po=pa,T=self.B[subjecta].acs[ba],minr=0.01,maxr=0.1,ilog=False)
p2 = np.where(self.B[subjecta].dev[ba]['ant'].phi<=kwargs['phi'])[0][-1]
# ax.plot(xa[:,p2],ya[:,p2])
ax.plot(xa[p2,:],ya[p2,:])
else:
self.din[ba]['ant'].eval()
xa,ya,z,sa,v = self.din[ba]['ant']._computemesh(po=self.din[ba]['p'],T=self.din[ba]['T'],minr=0.01,maxr=0.1,ilog=False)
p2 = np.where(self.din[ba]['ant'].phi<=kwargs['phi'])[0][-1]
ax.plot(xa[:,p2],ya[:,p2])
return fig,ax
def showlink(self,a='AP1',b='BackCenter',technoa='HKB',technob='HKB',**kwargs):
""" show link configuation for a given frame
Parameters
----------
a : int
link index
b : int
link index
technoa : string
default 'HKB'|'TCR'|'BS'
technob
default 'HKB'|'TCR'|'BS'
phi : float
antenna elevation in rad
"""
defaults = { 'fig':[],
'ax':[],
't':0,
'phi':np.pi/2.,
'ap':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig=plt.figure()
else :
fig = kwargs['fig']
if kwargs['ax'] == []:
ax=fig.add_subplot(111)
else :
ax = kwargs['ax']
# display nodes
fig,ax=self.showpattern(a=a,techno=technoa,fig=fig,ax=ax)
fig,ax=self.showpattern(a=b,techno=technob,fig=fig,ax=ax)
plt.axis('equal')
p1 = self.din['HKB:1']['p']
p2 = self.din['HKB:2']['p']
p3 = self.din['HKB:3']['p']
p4 = self.din['HKB:4']['p']
plt.plot(p1[0],p1[1],'og')
plt.plot(p2[0],p2[1],'ob')
plt.plot(p3[0],p3[1],'or')
plt.plot(p4[0],p4[1],'ok')
plt.axis('equal')
# if A.ndim==2:
# plt.plot(A[iframe,0],A[iframe,1],'ob')
# plt.text(A[iframe,0],A[iframe,1],a)
# else:
# plt.plot(A[0],A[1],'or')
# #plt.text(A[0],A[1],a)
# if B.ndim==2:
# plt.plot(B[iframe,0],B[iframe,1],style)
# plt.text(B[iframe,0]+0.1,B[iframe,1]+0.1,b)
# else:
# plt.plot(B[0],B[1],'ob')
# plt.text(B[0],B[1],b)
# plt.xlim(-6,6)
# plt.ylim(-5,5)
# self.B[subjecta].settopos(t=t)
# self.B[subjectb].settopos(t=t)
#
# # display body
# #pc = self.B.d[:,2,iframe] + self.B.pg[:,iframe].T
# pc0 = self.B[subjecta].d[:,0,iframe] + self.B[subjecta].pg[:,iframe].T
# pc1 = self.B[subjecta].d[:,1,iframe] + self.B[subjecta].pg[:,iframe].T
# pc15 = self.B[subjecta].d[:,15,iframe] + self.B[subjecta].pg[:,iframe].T
# #plt.plot(pc0[0],pc0[1],'og')
# #plt.text(pc0[0]+0.1,pc0[1],str(iframe))
# #plt.plot(pc1[0],pc1[1],'og')
# #plt.plot(pc15[0],pc15[1],'og')
# #ci00 = plt.Circle((pc0[0],pc0[1]),self.B[subjecta].sl[0,2],color='green',alpha=0.6)
# #ci01 = plt.Circle((pc1[0],pc1[1]),self.B[subjecta].sl[0,2],color='green',alpha=0.1)
# #ci100 = plt.Circle((pc0[0],pc0[1]),self.B[subjecta].sl[10,2],color='red',alpha=0.1)
# ci1015 = plt.Circle((pc15[0],pc15[1]),self.B[subjecta].sl[10,2],color='green',alpha=0.5)
# plt.axis('equal')
# ax = plt.gca()
# ax.add_patch(ci1015)
# #ax.add_patch(ci01)
# #ax.add_patch(ci100)
# #ax.add_patch(ci1015)
# #its = self.B[subjecta].intersectBody(A[iframe,:],B[iframe,:],topos=False,frameId=iframe)
# #x.set_title('frameId :'+str(iframe)+' '+str(its.T))
def visidev(self,a,b,technoa='HKB',technob='HKB',dsf=10):
""" get link visibility status
Returns
-------
visi : pandas Series
0 : LOS
1 : NLOS
"""
A,B = self.getlinkp(a,b,technoa=technoa,technob=technob)
A=A.values
B=B.values
aa,ia,ba,subjecta,technoa= self.devmapper(a,technoa)
ab,ib,bb,subjectb,technob= self.devmapper(b,technob)
if 'AP' not in aa:
Nframe = A.shape[0]
if 'AP' not in ab:
Nframe = B.shape[0]
else:
Nframe = len(self.B[self.B.keys()[0]].time)
iframe = np.arange(0,Nframe-1,dsf)
tvisi = []
#
# A : Nframe x 3
# B : Nframe x 3
# B.pg : 3 x Nframe
#
if subjecta != '':
subject = subjecta
elif subjectb != '':
subject = subjectb
else :
raise AttributeError('Visibility can only be determine on a body for now')
if self.B[subject].centered:
A = A-self.B[subject].pg.T
B = B-self.B[subject].pg.T
for k in iframe:
if len(np.shape(A))<2:
A=A[np.newaxis,:]*np.ones((len(B),3))
if len(np.shape(B))<2:
B=B[np.newaxis,:]*np.ones((len(A),3))
its = self.B[subject].intersectBody(A[k,:],B[k,:],topos=False,frameId=k)
tvisi.append(its.any())
visi = pd.Series(tvisi,index=iframe/100.)
#return(visi,iframe)
return(visi)
def visidev2(self,a,b,technoa='HKB',technob='HKB',trange=[]):
""" get link visibility status
Returns
-------
trange : nd array
time range
visi : pandas Series
0 : LOS
1 : NLOS
"""
A,B = self.getlinkp(a,b,technoa,technob)
A=A.values
B=B.values
aa,ia,ba,subjecta,technoa= self.devmapper(a,technoa)
ab,ib,bb,subjectb,technob= self.devmapper(b,technob)
if 'AP' not in a:
Nframe = A.shape[0]
if 'AP' not in b:
Nframe = B.shape[0]
# iframe = np.arange(0,Nframe-1,dsf)
tvisi = []
#
# A : Nframe x 3
# B : Nframe x 3
# B.pg : 3 x Nframe
#
if subjecta != '':
subject = subjecta
elif subjectb != '':
subject = subjectb
else :
raise AttributeError('Visibility can only be determine on a body for now')
if self.B[subject].centered:
A = A-self.B[subject].pg.T
B = B-self.B[subject].pg.T
for t in trange:
fid = self.B[subject].posvel(self.B[subjecta].traj,t)[0]
its = self.B[subject].intersectBody(A[fid,:],B[fid,:],topos=False,frameId=fid)
tvisi.append(its.any())
visi = pd.Series(tvisi,index=trange)
#return(visi,iframe)
return(visi)
def _visiarray(self,a,b,technoa='HKB',technob='HKB'):
""" create entries for plu.rectplot
"""
visi = self.visidev(a,b,technoa=technoa,technob=technob)
tv = visi.index.values
vv = visi.values.astype(int)
if (not(vv.all()) and vv.any()):
df = vv[1:]-vv[0:-1]
um = np.where(df==1)[0]
ud = np.where(df==-1)[0]
lum = len(um)
lud = len(ud)
#
# impose same size and starting
# on leading edge um and endinf on
# falling edge ud
#
if lum==lud:
if ud[0]<um[0]:
um = np.hstack((np.array([0]),um))
ud = np.hstack((ud,np.array([len(vv)-1])))
else:
if ((lum<lud) & (vv[0]==1)):
um = np.hstack((np.array([0]),um))
if ((lud<lum) & (vv[len(vv)-1]==1)):
ud = np.hstack((ud,np.array([len(vv)-1])))
tseg = np.array(zip(um,ud))
#else:
# tseg = np.array(zip(ud,um))
else:
if vv.all():
tseg = np.array(zip(np.array([0]),np.array([len(vv)-1])))
else :
tseg = np.array([[0,0]])
itseg = copy.copy(tseg)
bb = np.insert(itseg[:,1],0,0)
ee = np.hstack((itseg[:,0],len(vv)))
itseg = np.array((bb,ee)).T
# bb = np.hstack((bb,len(vv)))
return vv,tv,tseg,itseg
# def _computedevpdf(self):
# """ create a timestamped data frame
# with all positions of devices
# """
# t=self.B.traj.time()
# pos = np.empty((len(t),12,3))
# for ik,k in enumerate(t):
# self.B.settopos(t=k)
# pos[ik,:,:]=self.B.getlinkp()
# df=[]
# for d in range(pos.shape[1]):
# df_tmp=pd.DataFrame(pos[:,d,:],columns=['x','y','z'],index=t)
# df_tmp['id']=self.B.dev.keys()[d]
# try :
# df = pd.concat([df,df_tmp])
# except:
# df = df_tmp
# df = df.sort_index()
# cols=['id','x','y','z']
# self.devdf=df[cols]
def _computedevpdf(self):
""" create a timestamped data frame
with positions of all devices
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
for b in B:
if 'dev' in dir(B[b]):
dev = B[b].dev.keys()
udev=[B[b].dev[d]['uc3d'] for d in dev]
postmp = np.array([np.mean(B[b]._f[:,u,:],axis=1) for u in udev])
pos = postmp.swapaxes(0,1)
t = B[b].time
for d in range(len(dev)):
df_tmp= | pd.DataFrame(pos[:,d,:],columns=['x','y','z'],index=t) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime as dt
from argparse import ArgumentTypeError
import japandas as jpd
import pandas as pd
def next_bday(day=None, n=1):
"""Returns the next business day after argument day.
"""
if day is None:
day = dt.date.today()
calendar = jpd.JapaneseHolidayCalendar()
cday = pd.offsets.CDay(n=n, calendar=calendar)
return | pd.to_datetime(day) | pandas.to_datetime |
import shutil
from pathlib import Path
import itertools
import math
import numpy as np
import pandas as pd
from statistics import mean
from scipy.optimize import minimize_scalar
def removeChars(s):
for c in [' ', '\\', '/', '^']:
s = s.replace(c, '')
return s
def rchop(s, suffix):
if suffix and s.endswith(suffix):
return s[:-len(suffix)]
return s
def IsValid(row):
comps = row['Filename'].split('_')
assert len(comps) == 5 or len(comps) == 6
lang = comps[0]
if lang.startswith('norm'):
return True
if lang.startswith('S'):
if int(comps[3]) == 13:
return False
return 'S'+comps[4]+'='+row['Annotation'] in ('Sa=a1', 'Sb=a1', 'Sb=a2')
if lang.startswith('M') or lang.startswith('B'):
if int(comps[3]) in [6,12,13]:
return False
return row['Annotation'] == 'a2'
print(row)
raise NotImplementedError
def GetVowel(row):
comps = row['Filename'].split('_')
assert len(comps) == 5 or len(comps) == 6
lang = comps[0]
if lang.startswith('norm'):
assert row['Annotation'] in ['a', 'i', 'u']
return 'norm@' + row['Annotation']
if lang.startswith('M'):
assert row['Annotation'] in ['a2']
return 'M@a2'
if lang.startswith('B'):
assert row['Annotation'] in ['a2']
return 'B@a2'
row_pos = comps[4]
assert row_pos in ['a', 'b']
if row['Annotation'] in [ # 'a', 'b', 'c', 'd', 'e',
'a1', 'a2',
'c1', 'c2', 'c2vs', 'c3', 'c4',
'd1', 'd2', 'd3',
]:
return lang + row_pos + '@' + row['Annotation']
elif row['Annotation'] in ['d1n', 'd1h']:
return lang + row_pos + '@' + 'd1'
elif row['Annotation'] in ['d2n', 'd2h']:
return lang + row_pos + '@' + 'd2'
elif row['Annotation'] in ['d3n', 'd3h']:
return lang + row_pos + '@' + 'd3'
else:
print(row)
raise NotImplementedError
def GetPersonLang(row):
comps = row['Filename'].split('_')
return '@'.join(comps[0:3])
def GetGenderLang(row):
comps = row['Filename'].split('_')
return '@'.join([comps[0], GetGender(row)])
def GetGender(row):
age_gender = int(row['Filename'].split('_')[2])
if age_gender % 2 == 1:
return 'M'
else:
return 'F'
def GetIsFiRows(row):
age_gender = int(row['Filename'].split('_')[2])
# female norm rows are skipped in Fi computation
if age_gender % 2 == 0:
return 'No'
lang = row['Filename'].split('_')[0]
# replace with norm after real data is ready
if 'norm' in lang:
return 'Yes'
else:
return 'No'
def LoadFormantData(lang):
all_data = []
for input in sorted(input_base_dir.glob(lang+'*.CSV')):
print(input)
single_df = pd.read_csv(input, converters={
'Annotation': removeChars}, na_values=['--undefined--', 'null'], skipinitialspace=True, sep=r"\s*[,]\s*", engine='python')
single_df.drop(single_df.filter(regex="Unname"), axis=1, inplace=True)
clean_df = single_df.dropna(subset=['Annotation'] + kCols)
clean_df = clean_df.copy()
clean_df['Table'] = input
num_nan = len(single_df) - len(clean_df)
if num_nan > 0:
print(input, 'Dropped', num_nan)
all_data.append(clean_df)
df = | pd.concat(all_data, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from kneed import KneeLocator
from jupyter_utils import AllDataset
data_dir = '../drp-data/'
GDSC_GENE_EXPRESSION = 'preprocessed/gdsc_tcga/gdsc_rma_gene_expr.csv'
TCGA_GENE_EXPRESSION = 'preprocessed/gdsc_tcga/tcga_log2_gene_expr.csv'
TCGA_CANCER = 'preprocessed/cancer_type/TCGA_cancer_one_hot.csv'
GDSC_CANCER = 'preprocessed/cancer_type/GDSC_cancer_one_hot.csv'
GDSC_lnIC50 = 'preprocessed/drug_response/gdsc_lnic50.csv'
TCGA_DR = 'preprocessed/drug_response/tcga_drug_response.csv'
gdsc_dr = pd.read_csv(data_dir + GDSC_lnIC50, index_col=0)
tcga_dr = | pd.read_csv(data_dir + TCGA_DR, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.layers import *
from keras.models import Sequential
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# Wczytanie datasetu
iris = load_iris()
# Stworzenie tabeli danych
data = | pd.DataFrame(data=np.c_[iris['data'], iris['target']], columns=iris['feature_names'] + ['target']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert | pd.Series([1, 2, 3]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[26]:
"""
LICENSE MIT
2021
<NAME>
Website : http://www.covidtracker.fr
Mail : <EMAIL>
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
I'm currently cleaning the code, please ask me if something is not clear enough.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
# In[27]:
import pandas as pd
import json
import france_data_management as data
import math
show_charts = False
PATH_STATS = "../../data/france/stats/"
PATH = "../../"
# In[28]:
df_regions_meta = pd.read_csv(PATH+"data/france/population_grandes_regions.csv")
# In[29]:
data.download_data_obepine()
df_obepine = data.import_data_obepine()
df_obepine_france = df_obepine.groupby("Date").mean().reset_index()
# In[52]:
data_adm_hosp_clage = data.import_data_hosp_ad_age()
# In[30]:
data.download_data()
# In[31]:
df, df_confirmed, dates, df_new, df_tests, df_deconf, df_sursaud, df_incid, df_tests_viros = data.import_data()
# In[32]:
df_new_france = data.import_data_new().groupby("jour").sum().reset_index()
# In[33]:
data.download_data_vue_ensemble()
df_vue_ensemble = data.import_data_vue_ensemble()
# In[34]:
#df_vacsi_a = data.import_data_vacsi_a_fra()
#df_vacsi_a_reg = data.import_data_vacsi_a_reg()
#df_vacsi_a_dep = data.import_data_vacsi_a_dep()
df_vacsi = data.import_data_vacsi_fra() #df_vacsi_a.groupby("jour").sum().reset_index()
df_vacsi_reg = data.import_data_vacsi_reg() #df_vacsi_a_reg.groupby(["jour", "reg"]).sum().reset_index()
df_vacsi_reg = df_vacsi_reg.merge(df_regions_meta, left_on="reg", right_on="code").rename({"n_tot_dose1": "n_cum_dose1"}, axis=1)
df_vacsi_dep = data.import_data_vacsi_dep().rename({"n_tot_dose1": "n_cum_dose1"}, axis=1)
#df_vacsi_a_dep.groupby(["jour", "dep"]).sum().reset_index().rename({"n_tot_dose1": "n_cum_dose1"}, axis=1)
# In[35]:
df_metro = data.import_data_metropoles()
df_metro["jour"] = df_metro["semaine_glissante"].map(lambda x: x[11:])
df_metro_65 = df_metro[df_metro["clage_65"] == 65]
df_metro_0 = df_metro[df_metro["clage_65"] == 0]
metropoles = list(dict.fromkeys(list(df_metro['Metropole'].dropna().values)))
# In[36]:
df_tests_viros_enrichi = data.import_data_tests_viros()
df_tests_viros_enrichi = df_tests_viros_enrichi.drop("regionName_y", axis=1).rename({"regionName_x": "regionName"}, axis=1)
# In[37]:
df_incid_clage = df_incid.copy()
df_incid_fra_clage = data.import_data_tests_sexe()
df_incid_fra = df_incid_fra_clage[df_incid_fra_clage["cl_age90"]==0]
df_france = df.groupby(["jour"]).sum().reset_index()
df_incid = df_incid[df_incid.cl_age90 == 0]
df_sursaud_france = df_sursaud.groupby(["date_de_passage"]).sum().reset_index()
df_sursaud_regions = df_sursaud.groupby(["date_de_passage", "regionName"]).sum().reset_index()
#df_new_france = df_new.groupby(["jour"]).sum().reset_index()
df_new_regions = df_new.groupby(["jour", "regionName"]).sum().reset_index()
# In[38]:
df_incid_clage_regions = df_incid_clage.groupby(["regionName", "jour", "cl_age90"]).sum().reset_index()
# In[39]:
df_tests_viros_regions = df_tests_viros_enrichi.groupby(["regionName", "jour", "cl_age90"]).sum().reset_index()
df_tests_viros_france = df_tests_viros_enrichi.groupby(["jour", "cl_age90"]).sum().reset_index()
# In[40]:
df_hosp_clage = data.import_data_hosp_clage()
df_hosp_clage_france = df_hosp_clage.groupby(["jour", "cl_age90"]).sum().reset_index()
df_hosp_clage_regions = df_hosp_clage.groupby(["regionName", "jour", "cl_age90"]).sum().reset_index()
# In[41]:
departements = list(dict.fromkeys(list(df_incid['dep'].values)))
regions = list(dict.fromkeys(list(df_incid['regionName'].dropna().values)))
clage_list = list(dict.fromkeys(list(df_incid_fra_clage['cl_age90'].dropna().values)))
df_regions = df.groupby(["jour", "regionName"]).sum().reset_index()
df_incid_regions = df_incid.groupby(["jour", "regionName"]).sum().reset_index()
zone_a = ["zone_a", "01", "03", "07", "15", "16", "17", "19", "21", "23", "24", "25", "26", "33", "38", "39", "40", "42", "43", "47", "58", "63", "64", "69", "70", "71", "73", "74", "79", "86", "90"]
zone_b = ["zone_b", "02", "04", "05", "06", "08", "10", "13", "14", "18", "22", "27", "28", "29", "35", "36", "37", "41", "44", "45", "49", "50", "51", "52", "53", "54", "55", "56", "57", "59", "60", "61", "62", "67", "68", "72", "76", "80", "83", "84", "85", "88"]
zone_c = ["zone_c", "09", "11", "12", "30", "31", "32", "34", "46", "48", "65", "66", "75", "77", "78", "81", "82", "91", "92", "93", "94", "95"]
confines_mars_2021 = ["confines_mars_2021", "02", "06", "27", "59", "60", "62", "75", "76", "77", "78", "80", "91", "92", "93", "94", "95"]
# In[42]:
def generate_data(data_incid=pd.DataFrame(), data_hosp=pd.DataFrame(), data_sursaud=pd.DataFrame(), data_new= | pd.DataFrame() | pandas.DataFrame |
import act
import requests
import json
import glob
import pandas as pd
import datetime as dt
import numpy as np
import xarray as xr
import dask
import matplotlib.pyplot as plt
import textwrap
import argparse
import importlib
from scipy import stats
from matplotlib.dates import DateFormatter
from matplotlib.dates import HourLocator
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap
from matplotlib import cm
def get_dqr(ds):
"""
Queries DQR webservice for the datastream name passed in
Parameters
----------
ds : str
ARM datastream name (ie, sgpmetE13.b1).
"""
# Build URL and call through requests
url = ''.join(("https://www.archive.arm.gov/dqrws/ARMDQR?datastream=", ds,
"&dqrfields=dqrid,starttime,endtime,metric,subject&timeformat=YYYYMMDD.hhmmss",
"&searchmetric=incorrect,suspect,missing"))
r = requests.get(url=url)
# Run through the returns and compile data
num = []
sdate = []
edate = []
code = []
sub = []
for line in r.iter_lines():
# filter out keep-alive new lines
if line:
decoded_line = line.decode('utf-8')
result = decoded_line.split('|')
num.append(result[0])
sdate.append(result[1])
edate.append(result[2])
code.append(result[3])
sub.append(result[4])
return {'dqr_num': num, 'sdate': sdate, 'edate': edate, 'code': code, 'subject': sub}
def get_doi(site, dsname, c_start, c_end):
# Get DOI Information from ARM's API
doi_url = 'https://adc.arm.gov/citationservice/citation/inst-class?id=' + inst[ii] + '&citationType=apa'
doi_url += '&site=' + site
doi_url += '&dataLevel=' + dsname.split('.')[-1]
doi_url += '&startDate=' + c_start
doi_url += '&endDate=' + c_end
doi = requests.get(url=doi_url)
if len(doi.text) > 0:
doi = doi.json()['citation']
else:
doi = 'N/A'
return doi
def get_metadata(ds, return_fac=False):
# Get Metadata Information, particularly the description
metadata_url = 'https://adc.arm.gov/solr8/metadata/select?q=datastream%3A' + ds
r = requests.get(url=metadata_url)
response = r.json()['response']
try:
response = response['docs'][0]
description = response['instrument_name_text']
if return_fac:
description = response['facility_name']
except:
description = ds
return description
def get_da(site, dsname, dsname2, data_path, t_delta, d, dqr, c_start, c_end):
"""
Function to calculate data availability for a particular instrument
Parameters
----------
site : str
ARM Site ID
dsname : str
Datastream name to use, minus site
dsname2 : str
Secondary datastream name to use, minus site
For instance if dsname = dlfptM1.b1, dsname2 = dlppiM1.b1
t_delta : float
Pre-defined time delta to use, otherwise resample to 1 minute
d : str
Date to process DA for
dqr : dict
Dictionary from get_dqr. This allows for DQRing of data without
multiple pings of the DQR web service at once
c_start : str
Campaign start date
c_end : str
Campaign end date
Returns
-------
dict
returns a dictionary of data and time deltas to use for plotting
"""
# Get files for particular day, defaults to archive area for now
ds = site + dsname
files = glob.glob('/'.join([data_path, site, ds, ds + '*' + d + '*nc']))
if len(files) == 0:
files = glob.glob('/'.join([data_path, site, ds, ds + '*' + d + '*cdf']))
files = sorted(files)
# Set time delta to 1 minute if not specified
if t_delta is None:
t_delta = 1
# Read data for primary datastream
if len(files) > 0:
try:
obj = act.io.armfiles.read_netcdf(files)
except ValueError:
obj = act.io.armfiles.read_netcdf(files[0])
obj = obj.sortby('time')
else:
obj = None
# Read data for secondary datastream
if dsname2 is not None:
ds2 = site + dsname2
files2 = glob.glob('/data/archive/' + site + '/' + ds2 + '/' + ds2 + '*' + d + '*nc')
if len(files2) == 0:
files2 = glob.glob('/data/archive/' + site + '/' + ds2 + '/' + ds2 + '*' + d + '*cdf')
files2 = sorted(files2)
if len(files2) > 0:
obj2 = act.io.armfiles.read_netcdf(files2, combine='nested', coords=['time'])
obj2 = obj2.sortby('time')
if obj is not None:
obj = obj['time'].combine_first(obj2['time'])
obj2.close()
else:
obj = obj2
else:
dsname2 = None
# Set up dataframe with all expected times for day
d0 = pd.to_datetime(d)
d1 = d0 + dt.timedelta(days=1)
d_range = pd.date_range(d0, d1, freq=str(t_delta) + 'T', closed='left')
df1 = pd.DataFrame({'counts': np.zeros(len(d_range))}, index=d_range)
# Join datasets with dataframe
code_map = {'suspect': 2, 'incorrect': 3, 'missing': 4}
if len(files) > 0:
counts = obj['time'].resample(time=str(t_delta) + 'min').count().to_dataframe()
counts[counts > 1] = 1
dqr_counts = counts * 0.
# Flag data for DQRs
# Work on passing DQR times to get_da to flag
for jj, d in enumerate(dqr['dqr_num']):
dqr_start = dt.datetime.strptime(dqr['sdate'][jj], '%Y%m%d.%H%M%S')
dqr_end = dt.datetime.strptime(dqr['edate'][jj], '%Y%m%d.%H%M%S')
# Check for open-ended DQRs
if dt.datetime(3000, 1, 1) < dqr_end:
dqr_end = dt.datetime.strptime(c_end, '%Y-%m-%d') + dt.timedelta(days=1)
idx = (counts.index > dqr_start) & (counts.index < dqr_end)
idx = np.where(idx)[0]
assessment = dqr['code'][jj]
if len(idx) > 0:
dqr_counts.iloc[idx] = code_map[assessment]
data = df1.join(counts)
data.loc[data['time'] > 0, 'time'] = 1
r_data = np.nan_to_num(data['time'].tolist())
dqr_data = df1.join(dqr_counts)
dqr_data.loc[dqr_data['time'] == 0, 'time'] = np.nan
dqr_data = dqr_data['time'].tolist()
obj.close()
else:
counts = df1
counts[counts > 1] = 1
dqr_counts = counts * 0.
# Flag data for DQRs
# Work on passing DQR times to get_da to flag
for jj, d in enumerate(dqr['dqr_num']):
dqr_start = dt.datetime.strptime(dqr['sdate'][jj], '%Y%m%d.%H%M%S')
dqr_end = dt.datetime.strptime(dqr['edate'][jj], '%Y%m%d.%H%M%S')
# Check for open-ended DQRs
if dt.datetime(3000, 1, 1) < dqr_end:
dqr_end = dt.datetime.strptime(c_end, '%Y-%m-%d') + dt.timedelta(days=1)
idx = (counts.index > dqr_start) & (counts.index < dqr_end)
idx = np.where(idx)[0]
assessment = dqr['code'][jj]
if len(idx) > 0:
dqr_counts.iloc[idx] = code_map[assessment]
data = df1
r_data = np.nan_to_num(data['counts'].tolist())
dqr_data = dqr_counts
dqr_data.loc[dqr_data.counts == 0, 'counts'] = np.nan
dqr_data = dqr_data.counts.tolist()
return {'data': r_data, 't_delta': t_delta, 'date': d0, 'dqr_data': dqr_data}
if __name__ == '__main__':
"""
Main function to get information from configuration file and create DA plots
Author : <NAME>
"""
# Time trials
now = pd.Timestamp.now()
# Get configuration file passed in from command line
parser = argparse.ArgumentParser(description='Create campaign summary plots.')
parser.add_argument('-c', '--conf', type=str, required=True,
help='Conf file to get information from')
args = parser.parse_args()
# Executes the config file so that the variables are accessible to this program
exec(open(args.conf).read())
# Get configuration information
site = conf['site']
inst = list(conf['instruments'].keys())
c_start = conf['start_date']
c_end = conf['end_date']
if 'data_path' in conf:
data_path = conf['data_path']
else:
data_path = '/data/archive'
if 'chart_style' in conf:
chart_style = conf['chart_style']
else:
chart_style = '2D'
# Set date range for plots
start = pd.to_datetime(c_start)
end = pd.to_datetime(c_end)
c_dates = pd.date_range(start, end + dt.timedelta(days=1), freq='d')
#c_dates = c_dates[0:2]
# Set up plot layout. Since it's a PDF, it's 8 plots per page
if 'info_style' not in conf:
conf['info_style'] = 'complex'
if chart_style == 'linear':
nrows = 20
ncols = 4
tw = 40
yi_spacing = 0.2
fs = 6
share_x = True
if conf['info_style'] == 'simple':
fs = 9
tw = 50
yi_spacing = 0.275
elif chart_style == '2D':
nrows = 8
ncols = 3
tw = 47
fs = 8
yi_spacing = 0.1
share_x = False
else:
raise ValueError('Please select linear or 2D for chart_style')
ct = 0
# Create pdf file
if 'outname' in conf:
filename = conf['outname']
ext = filename.split('.')[-1]
pdf_pages = PdfPages(filename)
# Process each instrument
doi_tab = []
dqr_tab = []
axes = None
for ii in range(len(inst)):
if ct == 0:
fig = plt.figure(figsize=(8.27, 11.69), constrained_layout=True, dpi=100)
gs = fig.add_gridspec(nrows, ncols)
dsname = conf['instruments'][inst[ii]]['dsname']
ds = conf['site'] + dsname
print(ds)
dqr = get_dqr(ds)
dqr_no = []
if conf['dqr_table'] is True:
for jj, d in enumerate(dqr['dqr_num']):
if dqr['dqr_num'][jj] in dqr_no:
continue
dqr_no.append(dqr['dqr_num'][jj])
dqr_tab.append([ds, dqr['dqr_num'][jj], dqr['code'][jj], '\n'.join(textwrap.wrap(dqr['subject'][jj], width=50)),
dqr['sdate'][jj], dqr['edate'][jj]])
dsname2 = None
ds2 = None
# Get secondary datastream if specified
if 'dsname2' in conf['instruments'][inst[ii]]:
dsname2 = conf['instruments'][inst[ii]]['dsname2']
ds2 = site + dsname2
# Get time delta if specified
t_delta = None
if 't_delta' in conf['instruments'][inst[ii]]:
t_delta = conf['instruments'][inst[ii]]['t_delta']
if 'data_path' in conf['instruments'][inst[ii]]:
data_path = conf['instruments'][inst[ii]]['data_path']
# Get number of workers if defined. Should be 1 worker for radars to
# avoid core dumps
workers = None
if 'workers' in conf['instruments'][inst[ii]]:
workers = conf['instruments'][inst[ii]]['workers']
# Set up the initial title of the doc
if ii == 0:
ax0 = fig.add_subplot(gs[ct, :])
ax0.set_frame_on(False)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
description = get_metadata(ds, return_fac=True)
ax0.text(0.5, 0.99, '\n'.join(textwrap.wrap(description, width=70)), size=14, ha='center')
ax0.text(0.5, 0.45, 'Atmospheric Radiation Measurement User Facility', size=12,
ha='center')
ct += 2
# Dask loop for multiprocessing
# workers should be set to 1 in the conf file for radars
task = []
for jj, d in enumerate(c_dates):
#task.append(get_da(site, dsname, dsname2, t_delta, d.strftime('%Y%m%d'), dqr))
task.append(dask.delayed(get_da)(site, dsname, dsname2, data_path, t_delta, d.strftime('%Y%m%d'), dqr, c_start, c_end))
results = dask.compute(*task, num_workers=workers)
# Get data from dask and create images for display
t_delta = int(stats.mode([r['t_delta'] for r in results])[0][0])
y_times = pd.date_range(start, start + dt.timedelta(days=1), freq=str(t_delta) + 'T', closed='left')
y_times_time = np.array([ti.time() for ti in y_times])
img = [list(r['data']) for r in results]
dqr_img = [list(r['dqr_data']) for r in results]
# Get DOI Information
doi = get_doi(site, dsname, c_start, c_end)
if conf['doi_table'] is True:
doi_tab.append([inst[ii].upper(), '\n'.join(textwrap.wrap(doi, width=90))])
description = get_metadata(ds)
# Add Subplot and start adding text
# Just text on this plot
ax0 = fig.add_subplot(gs[ct, 0])
ax0.set_frame_on(False)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
yi = 0.95
if conf['info_style'] == 'simple':
ax0.text(0, yi, inst[ii].upper(), size=fs, va='top', weight='bold')
yi -= yi_spacing
ds_str = ds
if dsname2 is not None:
ds_str += ', ' + ds2
ds_str = '\n'.join(textwrap.wrap(ds_str, width=tw))
ax0.text(0, yi, ds_str, size=fs, va='top')
else:
ax0.text(0, yi, '\n'.join(textwrap.wrap(description, width=tw)), size=fs, va='top')
yi -= yi_spacing
if len(description) > tw:
yi -= yi_spacing * np.floor(len(description)/tw)
ax0.text(0, yi, 'ARM Name: ' + inst[ii].upper(), size=fs, va='top')
yi -= yi_spacing
ds_str = ds
if dsname2 is not None:
ds_str += ', ' + ds2
ds_str = '\n'.join(textwrap.wrap(ds_str, width=tw))
ax0.text(0, yi, 'Datastream: ' + ds_str, size=fs, va='top')
yi -= yi_spacing * 1.1
if len(ds_str) > tw:
yi -= yi_spacing * np.floor(len(ds_str)/tw)
if conf['doi_table'] is False:
ax0.text(0, yi, '\n'.join(textwrap.wrap(doi, width=tw)), va='top', size=fs)
# Plot out the DA on the right plots
newcmp = ListedColormap(['white', 'cornflowerblue', 'yellow', 'red'])
ax1 = fig.add_subplot(gs[ct, 1:], rasterized=True, sharex=axes)
if axes is None:
axes = ax1
if chart_style == '2D':
ax1.pcolormesh(c_dates, y_times, np.transpose(img), vmin=0, vmax=3,
cmap=newcmp, shading='flat', zorder=0, edgecolors='face')
ax1.pcolor(c_dates, y_times, np.transpose(dqr_img), hatch='/', zorder=0, alpha=0)
ax1.yaxis.set_major_locator(HourLocator(interval=6))
ax1.yaxis.set_major_formatter(DateFormatter('%H:%M'))
elif chart_style == 'linear':
img = np.array(img).flatten()
x_times = [np.datetime64(c + dt.timedelta(hours=yt.hour, minutes=yt.minute)) for c in c_dates for yt in y_times]
idx = np.where(img > 0)[0]
time_delta = act.utils.determine_time_delta(np.array(x_times))
if len(idx) > 0:
barh_list_green = act.utils.reduce_time_ranges(np.array(x_times)[idx], time_delta=time_delta,
broken_barh=True)
ax1.broken_barh(barh_list_green, (0, 1), facecolors='green')
dqr_img = np.array(dqr_img).flatten()
code_map = {'suspect': 2, 'incorrect': 3, 'missing': 4}
code_colors = {'suspect': 'yellow', 'incorrect': 'red', 'missing': 'grey'}
for code in code_map:
idx = np.where(dqr_img == code_map[code])[0]
if len(idx) == 0:
continue
time_delta = act.utils.determine_time_delta(np.array(x_times))
barh_list = act.utils.reduce_time_ranges(np.array(x_times)[idx], time_delta=time_delta,
broken_barh=True)
ax1.broken_barh(barh_list, (0, 1), facecolors=code_colors[code])
ax1.set_ylim([0,1])
ax1.get_yaxis().set_visible(False)
if ct == 0 or ii == 0:
ax1.xaxis.tick_top()
plt.xticks(fontsize=8)
else:
ax1.get_xaxis().set_visible(False)
plt.subplots_adjust(top=0.95, left=0.02, right=0.96, hspace=0)
ax1.set_xlim([pd.to_datetime(c_start), pd.to_datetime(c_end) + | pd.Timedelta('1 days') | pandas.Timedelta |
import pandas as pd
def extract_forecast(orders: pd.DataFrame):
df = orders.iloc[:, -12:].copy()
df.drop(df[df.sum(axis=1) == 0].index, inplace=True)
return df
class OrderHistory:
def __init__(self):
self.orders = pd.DataFrame(columns=["date", "product_no", "amount"])
def initialize(self):
df1 = pd.read_csv("C://sl_data//2100_tuketim.csv", low_memory=False)
df2 = pd.read_csv("C://sl_data//2200_tuketim.csv", low_memory=False)
df3 = | pd.read_csv("C://sl_data//2019_kalan_tuketim.csv", low_memory=False) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 16:22:22 2020
@author: trucdo
"""
#%% Define path, file names, and other parameters
# indicate name (exclude file extension) and path of .csv file containing TFBS hits
in_path = "path_to_directory"
tfbs_file_name = "Burkholderia_cepacia_ATCC_25416_CDS-IC-filteredfragments"
# indicate information content or other value for cutoff (float)
cutoff = 0
#%% Import modules
# to interact with OS-dependent functionality
import os
# to store transcription factor binding site as a Pandas dataframe and to work with arrays
import pandas as pd
#import numpy as np
# import matplotlib and seaborn packages for plotting data
import matplotlib.pyplot as plt
#%% Class definitions
class tfbs(object):
# class to store information for a TFBS
# input: a single TFBS hit from PWMmodel.py
# constructor to call an instance of the class and parse input to calculate attributes
# input: self variable referring to PWMmodel.py result for a single TFBS hit (str)
# all attributes are string type
def __init__(self, full_result):
# split full TFBS result into a list using comma delimiter ","
parsed_result = full_result.split(",")
self.index = parsed_result[0]
self.sequence = parsed_result[1]
self.chromosome_no = parsed_result[2]
self.start_location = parsed_result[3]
self.PWM_score = parsed_result[4]
self.CDS_neighbor = parsed_result[5]
self.CDS_annotation = parsed_result[6]
#%% Main program
# navigate to directory where results are stored
os.chdir(in_path)
# open TFBS results .csv file, read in each TFBS as a separate line, remove header
tfbs_file_handle = open(tfbs_file_name + ".csv")
tfbs_file_readlines = tfbs_file_handle.readlines()
header = tfbs_file_readlines.pop(0)
# iterate through each TFBS, convert it to a tfbs object, and then dict of dict
# dict key is arbitrary index
# dict values are the original index, sequence, chromosome_no, start_location, PWM_score of TFBS hit, CDS info
# use inner dict to separately store attributes of each TFBS
all_tfbs_dict = {}
count = 1
out_file = open(tfbs_file_name + "_scores.txt", "w")
for line in tfbs_file_readlines:
inner_dict = {}
tfbs_obj = tfbs(line.rstrip("\n"))
inner_dict["index"] = tfbs_obj.index
inner_dict["sequence"] = tfbs_obj.sequence
inner_dict["chromosome_no"] = tfbs_obj.chromosome_no
inner_dict["start_location"] = int(tfbs_obj.start_location)
inner_dict["PWM_score"] = float(tfbs_obj.PWM_score)
inner_dict["CDS_neighbor"] = tfbs_obj.CDS_neighbor
inner_dict["CDS_annotation"] = tfbs_obj.CDS_annotation
all_tfbs_dict[count] = inner_dict
count = count + 1
out_file.write(str(tfbs_obj.PWM_score + "\n"))
out_file.close()
# convert TFBS dict to dataframe and transpose so each extracted sequence is a row and its info is a column
all_tfbs_df = | pd.DataFrame(all_tfbs_dict) | pandas.DataFrame |
import pandas as pd
import pandas as pd
sample1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
sample5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0)["score"]
sample6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample7 = pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0)["score"]
sample9 = pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample10 = pd.read_table('WT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
#
meta1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta7= pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta9 = | pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0) | pandas.read_table |
""" ecospold2matrix - Class for recasting ecospold2 dataset in matrix form.
The module provides function to parse ecospold2 data, notably ecoinvent 3, as
Leontief A-matrix and extensions, or alternatively as supply and use tables for
the unallocated version of ecoinvent.
:PythonVersion: 3
:Dependencies: pandas 0.14.1 or more recent, scipy, numpy, lxml and xml
License: BDS
Authors:
<NAME>
<NAME>
<NAME>
<NAME>
Credits:
This module re-uses/adapts code from brightway2data, more specifically the
Ecospold2DataExtractor class in import_ecospold2.py, changeset:
271:7e67a75ed791; Wed Sep 10; published under BDS-license:
Copyright (c) 2014, <NAME> and ETH Zürich
Neither the name of ETH Zürich nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE
COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pdb
import os
import glob
import io
import pkgutil
import subprocess
from lxml import objectify
import xml.etree.ElementTree as ET
from lxml import etree
import pandas as pd
_df = pd.DataFrame
import numpy as np
import scipy.sparse
import scipy.io
import logging
import pickle
import gzip
import csv
import shelve
import hashlib
import sqlite3
try:
import IPython
except:
pass
import re
import xlrd
import xlwt
import copy
# pylint: disable-msg=C0103
class Ecospold2Matrix(object):
"""
Defines a parser object that holds all project parameters and processes the
ecospold-formatted data into matrices of choice.
The two main functions of this class are ecospold_to_Leontief() and
ecospold_to_sut()
"""
# Some hardcoded stuff
__PRE = '{http://www.EcoInvent.org/EcoSpold02}'
__ELEXCHANGE = 'ElementaryExchanges.xml'
__INTERMEXCHANGE = 'IntermediateExchanges.xml'
__ACTIVITYINDEX = 'ActivityIndex.xml'
__DB_CHARACTERISATION = 'characterisation.db'
rtolmin = 1e-16 # 16 significant digits being roughly the limit of float64
__TechnologyLevels = pd.Series(
['Undefined', 'New', 'Modern', 'Current', 'Old', 'Outdated'],
index=[0, 1, 2, 3, 4, 5])
def __init__(self, sys_dir, project_name, out_dir='.', lci_dir=None,
positive_waste=False, prefer_pickles=False, nan2null=False,
save_interm=True, PRO_order=['ISIC', 'activityName'],
STR_order=['comp', 'name', 'subcomp'],
verbose=True, version_name='ecoinvent31',
unlinked = True, remove_markets=True):
""" Defining an ecospold2matrix object, with key parameters that
determine how the data will be processes.
Args:
-----
* sys_dir: directory containing the system description,i.e., ecospold
dataset and master XML files
* project_name: Name used to log progress and save results
* out_dir: Directory where to save result matrices and logs
* lci_dir: Directory where official cummulative LCI ecospold files are
* positive_waste: Whether or not to change sign convention and make
waste flows positive
[default false]
* prefer_pickles: If sys_dir contains pre-processed data in form of
pickle-files, whether or not to use those
[Default: False, don't use]
* nan2null: Whether or not to replace Not-a-Number by 0.0
[Default: False, don't replace anything]
* save_interm: Whether or not to save intermediate results as pickle
files for potential re-use
[Default: True, do it]
* PRO_order: List of meta-data used for sorting processes in the
different matrices.
[Default: first sort by order of ISIC code, then, within
each code, by order of activity name]
* PRO_order: List of meta-data used for sorting stressors (elementary
flows) in the different matrices.
[Default: first sort by order of compartment,
subcompartment and then by name]
* unlinked: Whether or not the datasets are linked/allocated.
[Default: True, the data are unlinked]
Main functions and worflow:
---------------------------
self.ecospold_to_Leontief(): Turn ecospold files into Leontief matrix
representation
* Parse ecospold files, get products, activities, flows, emissions
* If need be, correct inconsistencies in system description
* After corrections, create "final" labels for matrices
* Generate symmetric, normalized system description (A-matrix,
extension F-matrix)
* Save to file (many different formats)
* Optionally, read cummulative lifecycle inventories (slow) and
compare to calculated LCI for sanity testing
self.ecospold_to_sut(): Turn unallocated ecospold into Suppy and Use
Tables
* Parse ecospold files, get products, activities, flows, emissions
* Organize in supply and use
* optionally, aggregate sources to generate a fully untraceable SUT
* Save to file
"""
# INTERMEDIATE DATA/RESULTS, TO BE GENERATED BY OBJECT METHODS
self.products = None # products, with IDs and descriptions
self.activities = None # activities, w IDs and description
self.inflows = None # intermediate-exchange input flows
self.outflows = None # intermediate-exchange output flows
self.prices = None
self.elementary_flows = None # elementary flows
self.q = None # total supply of each product
self.PRO_old=None
self.STR_old = None
self.IMP_old=None
# FINAL VARIABLES: SYMMETRIC SYSTEM, NORMALIZED AND UNNORMALIZED
self.PRO = None # Process labels, rows/cols of A-matrix
self.STR = None # Factors labels, rows extensions
self.IMP = pd.DataFrame([]) # impact categories
self.A = None # Normalized Leontief coefficient matrix
self.F = None # Normalized factors of production,i.e.,
# elementary exchange coefficients
self.Z = None # Intermediate unnormalized process flows
self.G_pro = None # Unnormalized Process factor requirements
self.C = pd.DataFrame([]) # characterisation matrix
# Final variables, unallocated and unnormalized inventory
self.U = None # Table of use of products by activities
self.V = None # Table of supply of product by activities
# (ammounts for which use is recorded)
self.G_act = None # Table of factor use by activities
self.V_prodVol = None # Table of supply production volumes
# (potentially to rescale U, V and G)
# QUALITY CHECKS VARIABLES, TO BE GENERATED BY OBJECT METHODS.
self.E = None # cummulative LCI matrix (str x pro)
self.unsourced_flows = None # product flows without clear source
self.missing_activities = None # cases of no incomplete dataset, i.e.,
# no producer for a product
# PROJECT NAME AND DIRECTORIES, FROM ARGUMENTS
self.sys_dir = os.path.abspath(sys_dir)
self.project_name = project_name
self.out_dir = os.path.abspath(out_dir)
if lci_dir:
self.lci_dir = os.path.abspath(lci_dir)
else:
self.lci_dir = lci_dir
self.version_name = version_name
self.char_method = None # characterisation method set by
# read_characterisation function
self.data_version = None
# PROJECT-WIDE OPTIONS
self.positive_waste = positive_waste
self.prefer_pickles = prefer_pickles
self.nan2null = nan2null
self.save_interm = save_interm
self.PRO_order = PRO_order
self.STR_order = STR_order
# DATASETS UNLINKED/UNALLOCATED
self.unlinked = unlinked # Is the data (spold files) linked and allocated or not. Default = True data is NOT linked
self.remove_markets = remove_markets # If the data is unlinked, remove the markets see function self.remove_Markets
# CREATE DIRECTORIES IF NOT IN EXISTENCE
if out_dir and not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
self.log_dir = os.path.join(self.out_dir, self.project_name + '_log')
# Fresh new log
os.system('rm -Rf ' + self.log_dir)
os.makedirs(self.log_dir)
# DEFINE LOG TOOL
self.log = logging.getLogger(self.project_name)
self.log.setLevel(logging.INFO)
self.log.handlers = [] # reset handlers
if verbose:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(self.log_dir,
project_name + '.log'))
fh.setLevel(logging.INFO)
aformat = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(aformat)
fh.setFormatter(formatter)
self.log.addHandler(fh)
if verbose:
ch.setFormatter(formatter)
self.log.addHandler(ch)
# RECORD OBJECT/PROJECT IDENTITY TO LOG
self.log.info('Ecospold2Matrix Processing')
try:
gitcommand = ["git", "log", "--pretty=format:%H", "-n1"]
githash = subprocess.check_output(gitcommand).decode("utf-8")
self.log.info("Current git commit: {}".format(githash))
except:
pass
self.log.info('Project name: ' + self.project_name)
# RECORD PROJECT PARAMETERS TO LOG
self.log.info('Unit process and Master data directory: ' + sys_dir)
self.log.info('Data saved in: ' + self.out_dir)
if self.lci_dir:
self.log.info('Official rolled-up life cycle inventories in: ' +
self.lci_dir)
if self.positive_waste:
self.log.info('Sign conventions changed to make waste flows '
'positive')
if self.prefer_pickles:
self.log.info('When possible, loads pickled data instead of'
' parsing ecospold files')
if self.nan2null:
self.log.info('Replace Not-a-Number instances with 0.0 in all'
' matrices')
if self.save_interm:
self.log.info('Pickle intermediate results to files')
self.log.info('Order processes based on: ' +
', '.join([i for i in self.PRO_order]))
self.log.info('Order elementary exchanges based on: ' +
', '.join([i for i in self.STR_order]))
database_name = self.project_name + '_' + self.__DB_CHARACTERISATION
os.system('rm ' + database_name)
try:
self.conn = sqlite3.connect(
self.project_name + '_' + self.__DB_CHARACTERISATION)
self.initialize_database()
except:
self.log.warning("Could not establish connection to database")
pass
self.conn.commit()
# =========================================================================
# MAIN FUNCTIONS
def ecospold_to_Leontief(self, fileformats=None, with_absolute_flows=False,
lci_check=False, rtol=5e-2, atol=1e-5, imax=3,
characterisation_file=None,
ardaidmatching_file=None):
""" Recasts an full ecospold dataset into normalized symmetric matrices
Args:
-----
* fileformats : List of file formats in which to save data
[Default: None, save to all possible file formats]
Options: 'Pandas' --> pandas dataframes
'csv' --> text with separator = '|'
'SparsePandas' --> sparse pandas dataframes
'SparseMatrix' --> scipy AND matlab sparse
'SparseMatrixForArda' --> with special
background
variable names
* with_absolut_flow: If true, produce not only coefficient matrices (A
and F) but also scale them up to production
volumes to get absolute flows in separate
matrices. [default: false]
* lci_check : If true, and if lci_dir is not None, parse cummulative
lifecycle inventory data as self.E matrix (str x pro),
and use it for sanity check against calculated
cummulative LCI
* rtol : Initial (max) relative tolerance for comparing E with
calculated E
* atol : Initial (max) absolute tolerance for comparing E with
calculated E
* characterisation_file: name of file containing characterisation
factors
* ardaidmatching_file: name of file matching Arda Ids, Ecoinvent2 DSIDs
and ecoinvent3 UUIDs. Only useful for the Arda
project.
Generates:
----------
* Intermediate data: products, activities, flows, labels
* A matrix: Normalized, intermediate exchange Leontief coefficients
(pro x pro)
* F matrix: Normalized extensions, factor requirements (elementary
exchanges) for each process (str x pro)
* E matrix: [optionally] cummulative normalized lci data (str x pro)
(for quality check)
Returns:
-------
* None, save all matrices in the object, and to file
"""
# Read in system description
self.extract_products()
self.extract_activities()
self.get_flows()
self.get_labels()
# Clean up if necessary
self.__find_unsourced_flows()
if self.unsourced_flows is not None:
self.__fix_flow_sources()
self.__fix_missing_activities()
# Once all is well, add extra info to PRO and STR, and order nicely
self.complement_labels()
# Finally, assemble normalized, symmetric matrices
self.build_AF()
if with_absolute_flows:
self.scale_up_AF()
if characterisation_file is not None:
print("starting characterisation")
if 'LCIA_implementation' in characterisation_file:
self.log.info("Characterisation file seems to be ecoinvent"
" LCIA implementation. Will apply simple name"
" matching")
self.simple_characterisation_matching(characterisation_file)
else:
self.prepare_matching_load_parameters()
self.process_inventory_elementary_flows()
self.read_characterisation(characterisation_file)
self.populate_complementary_tables()
self.characterize_flows()
self.generate_characterized_extensions()
if ardaidmatching_file:
self.make_compatible_with_arda(ardaidmatching_file)
# Save system to file
self.save_system(fileformats)
# Read/load lci cummulative emissions and perform quality check
if lci_check:
self.get_cummulative_lci()
self.cummulative_lci_check(rtol, atol, imax)
self.log.info('Done running ecospold2matrix.ecospold_to_Leontief')
def ecospold_to_sut(self, fileformats=None, make_untraceable=False):
""" Recasts an unallocated ecospold dataset into supply and use tables
Args:
-----
* fileformats : List of file formats in which to save data
[Default: None, save to all possible file formats]
Options: 'Pandas' --> pandas dataframes
'SparsePandas' --> sparse pandas dataframes,
'SparseMatrix' --> scipy AND matlab sparse
'csv' --> text files
* make_untraceable: Whether or not to aggregate away the source
activity dimension, yielding a use table in which
products are no longer linked to their providers
[default: False; don't do it]
Generates:
----------
* Intermediate data: Products, activities, flows, labels
* V table Matrix of supply of product by activities
* U table Matrix of use of products by activities
(recorded for a given supply amount, from V)
* G_act Matrix of factor use by activities
(recorded for a given supply amount, from V)
* V_prodVol Matrix of estimated real production volumes,
arranged as suply table (potentially useful
to rescale U, V and G)
Returns:
-------
* None, save all matrices in the object, and to file
"""
# Extract data on producs and activities
self.extract_products()
self.extract_activities()
# Extract or load data on flows and labels
self.get_flows()
self.get_labels()
self.complement_labels()
# Arrange as supply and use
if self.remove_markets is True:
self.remove_Markets()
self.build_sut(make_untraceable)
# Save to file
self.save_system(fileformats)
self.log.info("Done running ecospold2matrix.ecospold_to_sut")
# =========================================================================
# INTERMEDIATE WRAPPER METHODS: parse or load data + pickle results or not
def get_flows(self):
""" Wrapper: load from pickle or call extract_flows() to read ecospold
files.
Behavious determined by:
------------------------
prefer_pickles: Whether or not to load flow lists from previous run
instead of (re)reading XML Ecospold files
save_interm: Whether or not to pickle flows to file for use in
another project run.
Generates:
----------
self.inflows
self.outflows
self.elementary_flows
self.prices
Returns:
--------
None, only defines within object
"""
filename = os.path.join(self.sys_dir, 'flows.pickle')
# EITHER LOAD FROM PREVIOUS ROUND...
if self.prefer_pickles and os.path.exists(filename):
# Read all flows
with open(filename, 'rb') as f:
[self.inflows,
self.elementary_flows,
self.outflows,
self.prices] = pickle.load(f)
# Log event
sha1 = self.__hash_file(f)
msg = "{} loaded from {} with SHA-1 of {}"
self.log.info(msg.format('Flows', filename, sha1))
# ...OR EXTRACT FROM ECOSPOLD DATA..
else:
self.extract_flows()
# optionally, pickle for further use
if self.save_interm:
with open(filename, 'wb') as f:
pickle.dump([self.inflows,
self.elementary_flows,
self.outflows,
self.prices], f)
# Log event
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Flows', filename, sha1))
def get_labels(self):
"""
Wrapper: load from pickle, or call methods to build labels from scratch
Behaviour determined by:
------------------------
* prefer_pickles: Whether or not to load flow lists from previous run
instead of (re)reading XML Ecospold files
* save_interm: Whether or not to pickle flows to file for use in
another project run.
Generates:
----------
* PRO: metadata on each process, i.e. production of each product
by each activity.
* STR: metadata on each stressor (or elementary exchange, factor of
production)
Returns:
--------
* None, only defines within object
NOTE:
-----
* At this stage, labels are at the strict minimum (ID, name) to
facilitate the addition of new processes or stressors, if need be, to
"patch" inconsistencies in the dataset. Once all is sorted out, more
data from product, activities, and elementary_flow descriptions are
added to the labels in self.complement_labels()
"""
filename = os.path.join(self.sys_dir, 'rawlabels.pickle')
# EITHER LOAD FROM PREVIOUS ROUND...
if self.prefer_pickles and os.path.exists(filename):
# Load from pickled file
with open(filename, 'rb') as f:
self.PRO, self.STR = pickle.load(f)
# Log event
sha1 = self.__hash_file(f)
msg = "{} loaded from {} with SHA-1 of {}"
self.log.info(msg.format('Labels', filename, sha1))
# OR EXTRACT FROM ECOSPOLD DATA...
else:
self.build_PRO()
self.build_STR()
# and optionally pickle for further use
if self.save_interm:
with open(filename, 'wb') as f:
pickle.dump([self.PRO, self.STR], f)
# Log event
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Labels', filename, sha1))
def get_cummulative_lci(self):
""" Wrapper: load from pickle or call build_E() to read ecospold files.
Behaviour determined by:
------------------------
* prefer_pickles: Whether or not to load flow lists from previous run
instead of (re)reading XML Ecospold files
* save_interm: Whether or not to pickle flows to file for use in
another project run.
* lci_dir: Directory where cummulative LCI ecospold are
Generates:
----------
* E: cummulative LCI emissions matrix
Returns:
--------
* None, only defines within object
"""
filename = os.path.join(self.lci_dir, 'lci.pickle')
# EITHER LOAD FROM PREVIOUS ROUND...
if self.prefer_pickles and os.path.exists(filename):
with open(filename, 'rb') as f:
self.E = pickle.load(f)
# log event
sha1 = self.__hash_file(f)
msg = "{} loaded from {} with SHA-1 of {}"
self.log.info(msg.format('Cummulative LCI', filename, sha1))
# OR BUILD FROM ECOSPOLD DATA...
else:
self.build_E()
# optionally, pickle for further use
if self.save_interm:
with open(filename, 'wb') as f:
pickle.dump(self.E, f)
# log event
sha1 = self.__hash_file(filename)
msg = "{} saved in {} with SHA-1 of {}"
self.log.info(msg.format('Cummulative LCI', filename, sha1))
# =========================================================================
# PARSING METHODS: the hard work with xml files
def extract_products(self):
""" Parses INTERMEDIATEEXCHANGE file to extract core data on products:
Id's, name, unitID, unitName.
Args: None
----
Returns: None
-------
Generates: self.products
----------
Credit:
------
This function incorporates/adapts code from Brightway2data, i.e., the
method extract_technosphere_metadata from class Ecospold2DataExtractor
"""
# The file to parse
fp = os.path.join(self.sys_dir, 'MasterData', self.__INTERMEXCHANGE)
assert os.path.exists(fp), "Can't find " + self.__INTERMEXCHANGE
def extract_metadata(o):
""" Subfunction to get the data from lxml root object """
# Get list of id, name, unitId, and unitName for all intermediate
# exchanges
# Added: CPC code (AJ)
try:
cpc = [o.classification[i].classificationValue for i in
range(len(o.classification)) if
o.classification[i].classificationSystem == 'CPC'][0]
except IndexError:
cpc = ''
return {'productName': o.name.text,
'unitName': o.unitName.text,
'productId': o.get('id'),
'unitId': o.get('unitId'),
'CPCCode': str(cpc)}
# Parse XML file
with open(fp, 'r', encoding="utf-8") as fh:
root = objectify.parse(fh).getroot()
pro_list = [extract_metadata(ds) for ds in root.iterchildren()]
# Convert this list into a dataFrame
self.products = pd.DataFrame(pro_list)
self.products.index = self.products['productId']
# Log event
sha1 = self.__hash_file(fp)
msg = "Products extracted from {} with SHA-1 of {}"
self.log.info(msg.format(self.__INTERMEXCHANGE, sha1))
def extract_activities(self):
""" Parses ACTIVITYINDEX file to extract core data on activities:
Id's, activity type, startDate, endDate
Args: None
----
Returns: None
--------
Generates: self.activities
---------
"""
# Parse XML file describing activities
activity_file = os.path.join(self.sys_dir,
'MasterData',
self.__ACTIVITYINDEX)
root = ET.parse(activity_file).getroot()
# Get list of activities and their core attributes
act_list = []
for act in root:
act_list.append([act.attrib['id'],
act.attrib['activityNameId'],
act.attrib['specialActivityType'],
act.attrib['startDate'],
act.attrib['endDate']])
# Remove any potential duplicates
act_list, _, _, _ = self.__deduplicate(act_list, 0, 'activity_list')
# Convert to dataFrame
self.activities = pd.DataFrame(act_list,
columns=('activityId',
'activityNameId',
'activityType',
'startDate',
'endDate'),
index=[row[0] for row in act_list])
self.activities['activityType'
] = self.activities['activityType'].astype(int)
# Log event
sha1 = self.__hash_file(activity_file)
msg = "{} extracted from {} with SHA-1 of {}"
self.log.info(msg.format('Activities', self.__ACTIVITYINDEX, sha1))
def extract_flows(self):
""" Extracts of all intermediate and elementary flows
Args: None
----
Returns: None
-------
Generates:
----------
self.inflows: normalized product (intermediate) inputs
self.elementary_flows: normalized elementary flows
self.outflows: normalized product (intermediate) outputs
"""
# Initialize empty lists
inflow_list = []
outflow_list = []
elementary_flows = []
product_price_list = []
# Get list of ecoSpold files to process
data_folder = os.path.join(self.sys_dir, 'datasets')
spold_files = glob.glob(os.path.join(data_folder, '*.spold'))
# Log event
self.log.info('Processing {} files in {}'.format(len(spold_files),
data_folder))
# ONE FILE AT A TIME
for sfile in spold_files:
# Get activityId from file name
current_file = os.path.basename(sfile)
current_id = os.path.splitext(current_file)[0]
# For each file, find flow data
root = etree.parse(sfile).getroot()
child_ds = root.find(self.__PRE + 'childActivityDataset')
if child_ds is None:
child_ds = root.find(self.__PRE + 'activityDataset')
flow_ds = child_ds.find(self.__PRE + 'flowData')
# GO THROUGH EACH FLOW IN TURN
for entry in flow_ds:
# Get magnitude of flow
try:
_amount = float(entry.attrib.get('amount'))
except:
# Get ID of failed amount
_fail_id = entry.attrib.get('elementaryExchangeId',
'not found')
if _fail_id == 'not found':
_fail_id = entry.attrib.get('intermediateExchangeId',
'not found')
# Log failure
self.log.warn("Parser warning: flow in {0} cannot be"
" converted' 'to float. Id: {1} - amount:"
" {2}".format(str(current_file),
_fail_id,
_amount))
continue
if _amount == 0: # Ignore entries of magnitude zero
continue
# GET OBJECT, DESTINATION AND/OR ORIGIN OF EACH FLOW
# ... for elementary flows
if entry.tag == self.__PRE + 'elementaryExchange':
elementary_flows.append([
current_id,
entry.attrib.get('elementaryExchangeId'),
_amount])
elif entry.tag == self.__PRE + 'intermediateExchange':
# ... or product use
if entry.find(self.__PRE + 'inputGroup') is not None:
inflow_list.append([
current_id,
entry.attrib.get('activityLinkId'),
entry.attrib.get('intermediateExchangeId'),
_amount])
# ... or product supply.
elif entry.find(self.__PRE + 'outputGroup') is not None:
outflow_list.append([
current_id,
entry.attrib.get('intermediateExchangeId'),
_amount,
entry.attrib.get('productionVolumeAmount'),
entry.find(self.__PRE + 'outputGroup').text])
# ... if output get the price info if it is a primary product
product_property_path = self.__PRE+'property[@propertyId ="38f94dd1-d5aa-41b8-b182-c0c42985d9dc"]'
if entry.findall(product_property_path) is not None:
for elem in entry.findall(product_property_path):
price = elem.attrib.get('amount')
for nextlevelelem in elem:
if nextlevelelem.tag == self.__PRE+'name':
name = nextlevelelem.text
elif nextlevelelem.tag == self.__PRE+'unitName':
unit = nextlevelelem.text
#print(nextlevelelem.tag,': ', nextlevelelem.text)
product_price_list.append([current_id,
entry.attrib.get('intermediateExchangeId'),
name, price, unit,
entry.find(self.__PRE + 'outputGroup').text])
#print(current_id,entry.attrib.get('intermediateExchangeId'),name, price, unit, entry.find(self.__PRE + 'outputGroup').text])
# Check for duplicates in outputflows
# there should really only be one output flow per activity
outflow_list, _, _, _ = self.__deduplicate(outflow_list,
0,
'outflow_list')
# CONVERT TO DATAFRAMES
self.inflows = pd.DataFrame(inflow_list, columns=['fileId',
'sourceActivityId',
'productId',
'amount'])
self.elementary_flows = pd.DataFrame(elementary_flows,
columns=['fileId',
'elementaryExchangeId',
'amount'])
out = pd.DataFrame(outflow_list,
columns=['fileId',
'productId',
'amount',
'productionVolume',
'outputGroup'],
index=[row[0] for row in outflow_list])
out['productionVolume'] = out['productionVolume'].astype(float)
out['outputGroup'] = out['outputGroup'].astype(int)
self.outflows = out
prices = pd.DataFrame(product_price_list,
columns = ['fileId',
'productId',
'name',
'amount',
'unit',
'outputGroup'],
index=[row[0] for row in product_price_list])
prices['amount'] = prices['amount'].astype(float)
prices['outputGroup'] = prices['outputGroup'].astype(int)
self.prices = prices
def build_STR(self):
""" Parses ElementaryExchanges.xml to builds stressor labels
Args: None
----
Behaviour influenced by:
------------------------
* self.STR_order: Determines how labels are ordered
Returns: None
-------
Generates: self.STR: DataFrame with stressor Id's for index
Credit:
-------
This function incorporates/adapts code from Brightway2data, that is,
the classmethod extract_biosphere_metadata from Ecospold2DataExtractor
"""
# File to parse
fp = os.path.join(self.sys_dir, 'MasterData', self.__ELEXCHANGE)
assert os.path.exists(fp), "Can't find ElementaryExchanges.xml"
def extract_metadata(o):
""" Subfunction to extract data from lxml root object """
return {
'id': o.get('id'),
'name': o.name.text,
'unit': o.unitName.text,
'cas': o.get('casNumber'),
'comp': o.compartment.compartment.text,
'subcomp': o.compartment.subcompartment.text
}
# Extract data from file
with open(fp, 'r', encoding="utf-8") as fh:
root = objectify.parse(fh).getroot()
self.STR = _df([extract_metadata(i) for i in root.iterchildren()])
# organize in pandas DataFrame
self.STR.index = self.STR['id']
self.STR = self.STR.reindex_axis(['id',
'name',
'unit',
'cas',
'comp',
'subcomp'], axis=1)
self.STR = self.STR.sort_values(by=self.STR_order)
# Log event
sha1 = self.__hash_file(fp)
msg = "{} extracted from {} with SHA-1 of {}"
self.log.info(msg.format('Elementary flows', self.__ELEXCHANGE, sha1))
def build_PRO(self):
""" Builds minimalistic intermediate exchange process labels
This functions parses all files in dataset folder. The list is
returned as pandas DataFrame. The index of the DataFrame is the
filename of the files in the DATASET folder.
Args: None
----
Behaviour influenced by:
------------------------
* self.PRO_order: Determines how labels are ordered
Returns: None
-------
Generates: self.PRO: DataFrame with file_Id's for index
----------
"""
# INITIALIZE
# ----------
# Use ecospold filenames as indexes (they combine activity Id and
# reference-product Id)
data_folder = os.path.join(self.sys_dir, 'datasets')
spold_files = glob.glob(os.path.join(data_folder, '*.spold'))
_in = [os.path.splitext(os.path.basename(fn))[0] for fn in spold_files]
# Initialize empty DataFrame
PRO = pd.DataFrame(index=_in, columns=('activityId',
'productId',
'activityName',
'ISIC',
'EcoSpoldCategory',
'geography',
'technologyLevel',
'macroEconomicScenario'))
# LOOP THROUGH ALL FILES TO EXTRACT ADDITIONAL DATA
# -------------------------------------------------
# Log event
if len(spold_files) > 1000:
msg_many_files = 'Processing {} files - this may take a while ...'
self.log.info(msg_many_files.format(len(spold_files)))
#print('One step further')
for sfile in spold_files:
#print(sfile)
# Remove filename extension
file_index = os.path.splitext(os.path.basename(sfile))[0]
#print(file_index)
# Parse xml tree
root = ET.parse(sfile).getroot()
# Record product Id
if self.unlinked == True:
#objectify is a very handy way to parse an xml tree
#into a python object which is more easily accsesible
rroot = objectify.parse(sfile).getroot()
if hasattr(rroot, "activityDataset"):
stem = rroot.activityDataset
else:
stem = rroot.childActivityDataset
#loop through the intermediate exchanges to find the ref. flow
#might be a better/smarter way but don't know it yet
for flow in stem.flowData.intermediateExchange:
if hasattr(flow, 'outputGroup'):
if flow.outputGroup == 0:
PRO.loc[file_index, 'productId'] = flow.attrib[
'intermediateExchangeId']
#For the unlnked data the file name does not
#feature the _productID anymore, so loop through the
#flow data to find the reference flow.
break #An activity has only one reference flow by
#construction so break out of loop after we found it
del rroot
else:
PRO.ix[file_index, 'productId'] = file_index.split('_')[1]
#if the data are linked, the product name is in the spold files
#print(file_index, sfile)
# Find activity dataset
child_ds = root.find(self.__PRE + 'childActivityDataset')
if child_ds is None:
child_ds = root.find(self.__PRE + 'activityDataset')
activity_ds = child_ds.find(self.__PRE + 'activityDescription')
# Loop through activity dataset
for entry in activity_ds:
# Get name, id, etc
if entry.tag == self.__PRE + 'activity':
PRO.ix[file_index, 'activityId'] = entry.attrib['id']
PRO.ix[file_index, 'activityName'] = entry.find(
self.__PRE + 'activityName').text
continue
# Get classification codes
if entry.tag == self.__PRE + 'classification':
if 'ISIC' in entry.find(self.__PRE +
'classificationSystem').text:
PRO.ix[file_index, 'ISIC'] = entry.find(
self.__PRE + 'classificationValue').text
if 'EcoSpold' in entry.find(
self.__PRE + 'classificationSystem').text:
PRO.ix[file_index, 'EcoSpoldCategory'
] = entry.find(self.__PRE +
'classificationValue').text
continue
# Get geography
if entry.tag == self.__PRE + 'geography':
PRO.ix[file_index, 'geography'
] = entry.find(self.__PRE + 'shortname').text
continue
# Get Technology
try:
if entry.tag == self.__PRE + 'technology':
PRO.ix[file_index, 'technologyLevel'
] = entry.attrib['technologyLevel']
continue
except:
# Apparently it is not a mandatory field in ecospold2.
# Skip if absent
pass
# Find MacroEconomic scenario
if entry.tag == self.__PRE + 'macroEconomicScenario':
PRO.ix[file_index, 'macroEconomicScenario'
] = entry.find(self.__PRE + 'name').text
continue
# quality check of id and index
if file_index.split('_')[0] != PRO.ix[file_index, 'activityId']:
self.log.warn('Index based on file {} and activityId in the'
' xml data are different'.format(str(sfile)))
# Final touches and save to self
PRO['technologyLevel'] = PRO['technologyLevel'].fillna(0).astype(int)
for i in self.__TechnologyLevels.index:
bo = PRO['technologyLevel'] == i
PRO.ix[bo, 'technologyLevel'] = self.__TechnologyLevels[i]
self.PRO = PRO.sort_values(by=self.PRO_order)
def extract_old_labels(self, old_dir, sep='|'):
""" Read in old PRO, STR and IMP labels csv-files from directory
self.STR_old must be defined with:
* with THREE name columns called name, name2, name3
* cas, comp, subcomp, unit
* ardaid, i.e., the Id that we wish to re-use in the new dataset
"""
# Read in STR
path = glob.glob(os.path.join(old_dir, '*STR*.csv'))[0]
self.STR_old = pd.read_csv(path, sep=sep)
# Read in PRO
path = glob.glob(os.path.join(old_dir, '*PRO*.csv'))[0]
self.PRO_old = pd.read_csv(path, sep=sep)
# Read in IMP
path = glob.glob(os.path.join(old_dir, '*IMP*.csv'))[0]
self.IMP_old = | pd.read_csv(path, sep=sep) | pandas.read_csv |
"""This module contains functions for using LDA topic modeling."""
import os
import datetime
import pandas as pd
import pickle
import json
from gensim.models import Phrases
from gensim.corpora import Dictionary
from gensim.models import TfidfModel, LdaModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.models.callbacks import PerplexityMetric, ConvergenceMetric, CoherenceMetric
from gensim.corpora.mmcorpus import MmCorpus
import spacy
import plotnine as p9
import logging
from usrightmedia.shared.loggers import get_logger
# python -m spacy download en_core_web_lg
nlp = spacy.load("en_core_web_lg")
INPUTS_DIR = os.path.join(
"..", "..", "data", "02-intermediate", "08-topic-models", "01-inputs"
)
MODELS_DIR = os.path.join(
"..", "..", "data", "02-intermediate", "08-topic-models", "02-models"
)
from usrightmedia.shared.loggers import get_logger
# =============================================================================================================
# PREPARE INPUTS: TEXT PRE-PROCESSING
"""
https://explosion.ai/demos/displacy-ent https://github.com/explosion/spaCy/blob/ed561cf428494c2b7a6790cd4b91b5326102b59d/spacy/glossary.py
# POS tags
# Universal POS Tags
# http://universaldependencies.org/u/pos/
"ADJ": "adjective",
"ADP": "adposition",
"ADV": "adverb",
"AUX": "auxiliary",
"CONJ": "conjunction",
"CCONJ": "coordinating conjunction",
"DET": "determiner",
"INTJ": "interjection",
"NOUN": "noun",
"NUM": "numeral",
"PART": "particle",
"PRON": "pronoun",
"PROPN": "proper noun",
"PUNCT": "punctuation",
"SCONJ": "subordinating conjunction",
"SYM": "symbol",
"VERB": "verb",
"X": "other",
"EOL": "end of line",
"SPACE": "space",
# Named Entity Recognition
# OntoNotes 5
# https://catalog.ldc.upenn.edu/docs/LDC2013T19/OntoNotes-Release-5.0.pdf
"PERSON": "People, including fictional",
"NORP": "Nationalities or religious or political groups",
"FACILITY": "Buildings, airports, highways, bridges, etc.",
"FAC": "Buildings, airports, highways, bridges, etc.",
"ORG": "Companies, agencies, institutions, etc.",
"GPE": "Countries, cities, states",
"LOC": "Non-GPE locations, mountain ranges, bodies of water",
"PRODUCT": "Objects, vehicles, foods, etc. (not services)",
"EVENT": "Named hurricanes, battles, wars, sports events, etc.",
"WORK_OF_ART": "Titles of books, songs, etc.",
"LAW": "Named documents made into laws.",
"LANGUAGE": "Any named language",
"DATE": "Absolute or relative dates or periods",
"TIME": "Times smaller than a day",
"PERCENT": 'Percentage, including "%"',
"MONEY": "Monetary values, including unit",
"QUANTITY": "Measurements, as of weight or distance",
"ORDINAL": '"first", "second", etc.',
"CARDINAL": "Numerals that do not fall under another type",
# Named Entity Recognition
# Wikipedia
# http://www.sciencedirect.com/science/article/pii/S0004370212000276
# https://pdfs.semanticscholar.org/5744/578cc243d92287f47448870bb426c66cc941.pdf
"PER": "Named person or family.",
"MISC": "Miscellaneous entities, e.g. events, nationalities, products or works of art",
# https://github.com/ltgoslo/norne
"EVT": "Festivals, cultural events, sports events, weather phenomena, wars, etc.",
"PROD": "Product, i.e. artificially produced entities including speeches, radio shows, programming languages, contracts, laws and ideas",
"DRV": "Words (and phrases?) that are dervied from a name, but not a name in themselves, e.g. 'Oslo-mannen' ('the man from Oslo')",
"GPE_LOC": "Geo-political entity, with a locative sense, e.g. 'John lives in Spain'",
"GPE_ORG": "Geo-political entity, with an organisation sense, e.g. 'Spain declined to meet with Belgium'",
}
"""
def preprocess_docs(docs, docs_type, INPUTS_DIR):
"""Pre-process which removes empty documents.
citation: based off of https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/atmodel_tutorial.ipynb
"""
processed_docs = []
for doc in nlp.pipe(docs):
# Process document using Spacy NLP pipeline
# Lemmatize tokens, remove punctuation, remove stopwords, remove certain entity types
doc = [
token.lemma_.lower()
for token in doc
if token.is_alpha
and not token.is_stop
and token.pos_ in ["NOUN", "ADJ", "ADV"]
and token.ent_type_ not in ["PERSON", "DATE", "TIME", "PERCENT", "QUANTITY"]
and token.text not in ["trump", "Trump"]
]
# pre-processing can result in some docs having no tokens (i.e., length is 0)
if len(doc) > 0:
processed_docs.append(doc)
docs = processed_docs
del processed_docs
# Add bigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if "_" in token:
# Token is a bigram, add to document.
docs[idx].append(token)
with open(os.path.join(INPUTS_DIR, "docs", f"docs_{docs_type}.pkl"), "wb") as handle:
pickle.dump(docs, handle)
return docs
def preprocess_docs_with_doc_ids(doc_ids, docs, docs_type, INPUTS_DIR):
"""Pre-process which includes doc_id field.
*Patch-up: should have included the ID information in preprocess_docs();
this is a fix so topic assignments can be associated back to their doc_ids from INCA.
Args:
doc_ids (list of str)
docs (list of str)
docs_type (str): "titles", "leads", "texts"
INPUTS_DIR (constant)
Returns:
df (dataframe): 'doc_id' and 'processed_doc' columns
citation: based off of https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/atmodel_tutorial.ipynb
"""
processed_ids = []
processed_docs = []
for n, doc in enumerate(nlp.pipe(docs)):
# Process document using Spacy NLP pipeline
# Lemmatize tokens, remove punctuation, remove stopwords, remove certain entity types
doc = [
token.lemma_.lower()
for token in doc
if token.is_alpha
and not token.is_stop
and token.pos_ in ["NOUN", "ADJ", "ADV"]
and token.ent_type_ not in ["PERSON", "DATE", "TIME", "PERCENT", "QUANTITY"]
and token.text not in ["trump", "Trump"]
]
# pre-processing can result in some docs having no tokens (i.e., length is 0)
if len(doc) > 0:
processed_ids.append(doc_ids[n])
processed_docs.append(doc)
docs = processed_docs
del processed_docs
# Add bigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if "_" in token:
# Token is a bigram, add to document.
docs[idx].append(token)
df = pd.DataFrame()
df['doc_id'] = processed_ids
df['processed_doc'] = docs
with open(os.path.join(INPUTS_DIR, "docs", f"docs_{docs_type}_with_inca_ids.pkl"), "wb") as handle:
pickle.dump(df, handle)
return df
# Create a dictionary representation of the documents, and filter out frequent and rare words.
def save_dictionary(docs, docs_type, INPUTS_DIR):
dictionary = Dictionary(docs)
# https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.filter_extremes
# Filter out words that occur too frequently or too rarely
dictionary.filter_extremes(
no_below=10, no_above=0.5, keep_n=100000, keep_tokens=None
)
dictionary.compactify()
dictionary.save(
os.path.join(INPUTS_DIR, "dictionaries", f"dictionary_{docs_type}.dict")
)
return dictionary
def save_corpus(dictionary, docs, docs_type, INPUTS_DIR):
# Vectorize data: bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
tfidf = TfidfModel(corpus)
tc = tfidf[corpus]
corpus_tfidf = [bow for bow in tc]
MmCorpus.serialize(
os.path.join(INPUTS_DIR, "corpora", f"corpus_count_{docs_type}.mm"), corpus
)
MmCorpus.serialize(
os.path.join(INPUTS_DIR, "corpora", f"corpus_tfidf_{docs_type}.mm"), corpus_tfidf
)
return corpus, corpus_tfidf
# =============================================================================================================
# LDA TOPIC MODELING
def load_inputs(corpus_type, docs_type, INPUTS_DIR):
"""Load inputs for computing topic model.
Args:
corpus_type (str): "count", "tfidf"
docs_type (str): "leads", "titles", "texts"
Returns:
inputs (dict): keys - "dictionary", "corpus", "docs", "corpus_type", "docs_type"
"""
inputs = {}
inputs["dictionary"] = Dictionary.load(
os.path.join(INPUTS_DIR, "dictionaries", f"dictionary_{docs_type}.dict")
)
inputs["corpus"] = MmCorpus(
os.path.join(INPUTS_DIR, "corpora", f"corpus_{corpus_type}_{docs_type}.mm")
)
with open(os.path.join(INPUTS_DIR, "docs", f"docs_{docs_type}.pkl"), "rb") as handle:
inputs["docs"] = pickle.load(handle)
inputs["corpus_type"] = corpus_type
inputs["docs_type"] = docs_type
return inputs
def compute_topic_models(corpus_type, docs_type, min_topics, max_topics, step_topics):
"""Compute topic model by corpus type and document type.
Args:
corpus_type (str): "count", "tfidf"
docs_type (str): "leads", "titles", "texts"
min_topics (int): minimum number of topics
max_topics (int): maximum number of topics
step_topics (int): number of topics to increment by between each model
Returns:
None
Output:
One subdirectory of files per model (f'{corpus_type}_{docs_type}_{topics_n}')
References:
https://www.machinelearningplus.com/nlp/topic-modeling-gensim-python/#17howtofindtheoptimalnumberoftopicsforlda
https://radimrehurek.com/gensim/auto_examples/tutorials/run_lda.html
https://markroxor.github.io/gensim/static/notebooks/lda_training_tips.html
https://www.meganstodel.com/posts/callbacks/
https://miningthedetails.com/blog/python/lda/GensimLDA/
https://groups.google.com/g/gensim/c/ojySenxQHi4
"""
# 1. load inputs
inputs = load_inputs(corpus_type, docs_type, INPUTS_DIR)
dictionary = inputs['dictionary']
corpus = inputs['corpus']
docs = inputs["docs"]
# 2. set LDA model arguments
distributed=False # default
chunksize=100000
# check logs: passes and iterations should be adjusted if convergence is low
# https://groups.google.com/g/gensim/c/ojySenxQHi4/m/jjX1RbbHERgJ
# `passes` is the number of training passes through the corpus.
# For example, if the training corpus has 50,000 documents, chunksize is 10,000, passes is 2, then online training is done in 10 updates
passes = 5
# https://groups.google.com/g/gensim/c/ojySenxQHi4/m/Ctny4H5lZXAJ
# By default, update_every=1, so that the update happens after each batch of `chunksize` documents.
update_every = 1
# Computational Analysis of Communication (p. 275 in book draft)
# we would generally recommend a relatively low and asymmetric alpha, and in fact Gensim by default uses an algorithm to find an alpha that corresponds to this recommendation.
# http://dirichlet.net/pdf/wallach09rethinking.pdf
# "Since the priors we advocate (an asymmetric Dirichlet over the document–topic distributions and a
# symmetric Dirichlet over the topic–word distributions) have significant modeling benefits and can
# be implemented using highly efficient algorithms, we recommend them as a new standard for LDA."
alpha="asymmetric"
eta="symmetric"
decay = 0.5 # default
offset = 1.0 # default
# Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
eval_every = 10
iterations = 50 # default
gamma_threshold = 0.001 # default
minimum_probability = 0.01 # default
# reproducibility
random_state=42
ns_conf = None # default
minimum_phi_value = 0.01 # default
per_word_topics = False # default
# 3. compute models
for n_topics in range(min_topics, max_topics, step_topics):
model_name = f'lda_corpus_{corpus_type}_docs_{docs_type}_topics_{n_topics}'
# Set up the callbacks loggers
LOGGER = get_logger(filename = f'{model_name}', logger_type='main')
convergence_logger = ConvergenceMetric(logger='shell')
coherence_cv_logger = CoherenceMetric(corpus=corpus, logger='shell', coherence = 'c_v', texts = docs)
perplexity_logger = PerplexityMetric(corpus=corpus, logger='shell')
LOGGER.debug(f'Start of LDA model: {model_name}')
LOGGER.debug(f'n_topics: {n_topics}')
LOGGER.debug(f'distributed: {distributed}')
LOGGER.debug(f'chunksize: {chunksize}')
LOGGER.debug(f'passes: {passes}')
LOGGER.debug(f'update_every: {update_every}')
LOGGER.debug(f'alpha: {alpha}')
LOGGER.debug(f'eta: {eta}')
LOGGER.debug(f'decay: {decay}')
LOGGER.debug(f'offset: {offset}')
LOGGER.debug(f'eval_every: {eval_every}')
LOGGER.debug(f'iterations: {iterations}')
LOGGER.debug(f'gamma_threshold: {gamma_threshold}')
LOGGER.debug(f'minimum_probability: {minimum_probability}')
LOGGER.debug(f'random_state: {random_state}')
LOGGER.debug(f'ns_conf: {ns_conf}')
LOGGER.debug(f'minimum_phi_value: {minimum_phi_value}')
LOGGER.debug(f'per_word_topics: {per_word_topics}')
# Create model
model = LdaModel(corpus=corpus,
num_topics=n_topics,
id2word=dictionary,
distributed=distributed,
chunksize=chunksize,
passes=passes,
update_every=update_every,
alpha=alpha,
eta=eta,
decay=decay,
offset=offset,
eval_every=eval_every,
iterations=iterations,
gamma_threshold=gamma_threshold,
random_state=random_state,
ns_conf=ns_conf,
minimum_phi_value=minimum_phi_value,
per_word_topics=per_word_topics,
callbacks=[convergence_logger, coherence_cv_logger, perplexity_logger])
# 4. Save model
if not os.path.exists(os.path.join(MODELS_DIR, f"{model_name}/")):
os.makedirs(os.path.join(MODELS_DIR, f"{model_name}/"))
model.save(os.path.join(MODELS_DIR, model_name, f"{model_name}.model"))
LOGGER.debug(f'End of LDA model: {model_name}')
LOGGER.debug(f'Start of coherence model: {model_name}')
coherence_c_v = CoherenceModel(model=model, texts=docs, dictionary=dictionary, coherence='c_v')
with open(os.path.join(MODELS_DIR, model_name, f"{model_name}_coherence_c_v.pkl"), "wb") as fn:
pickle.dump(coherence_c_v, fn)
LOGGER.debug(f'End of coherence model: {model_name}')
def compute_coherence_values(corpus_type, docs_type, min_topics, max_topics, step_topics):
"""Compute coherence values for models.
Args:
corpus_type (str): "count", "tfidf"
docs_type (str): "leads", "titles", "texts"
min_topics (int): minimum number of topics
max_topics (int): maximum number of topics
step_topics (int): number of topics to increment by between each model
Returns:
None (writes to jsonl file)
"""
try:
for n_topics in range(min_topics, max_topics, step_topics):
model_name = f"lda_corpus_{corpus_type}_docs_{docs_type}_topics_{n_topics}"
LOGGER = get_logger(filename = f'{model_name}_coherence_c_v', logger_type='main')
json_file = os.path.join(MODELS_DIR, "coherence_c_v_summary", "coherence_c_v_summary.jsonl")
coherence_dicts = []
if json_file and os.path.exists(json_file):
with open(file=json_file, mode="r", encoding="utf-8") as file:
for line in file:
coherence_dicts.append(json.loads(line))
# only compute coherence value for a model if it doesn't exist in the json file
if len([d for d in coherence_dicts if d['model_name']==model_name]) == 0:
LOGGER.info(f"{model_name} with {n_topics} topics does not exist in the json file")
# create dict to hold model info
d = {}
d["model_name"] = model_name
d["corpus_type"] = corpus_type
d["docs_type"] = docs_type
d["model_group"] = d["docs_type"] + "_" + d["corpus_type"]
d["n_topics"] = n_topics
# Compute coherence using c_v
with open(os.path.join(MODELS_DIR, model_name, f"{model_name}_coherence_c_v.pkl"), "rb") as handle:
coherence_c_v = pickle.load(handle)
LOGGER.info(f"Starting coherence calculation for {model_name} with {n_topics} topics")
coherence_score_c_v = coherence_c_v.get_coherence()
d["coherence_score_c_v"] = coherence_score_c_v
LOGGER.info(f"Completed coherence calculation for {model_name} with {n_topics} topics: coherence_score_c_v = {coherence_score_c_v}")
d_json = json.dumps(d)
with open(file=os.path.join(MODELS_DIR, "coherence_c_v_summary", "coherence_c_v_summary.jsonl"), mode="a", encoding="utf-8") as f_out:
f_out.write(d_json + "\n")
LOGGER.info(f"Wrote coherence calculation for {model_name} with {n_topics} topics to {os.path.join(MODELS_DIR, 'coherence_c_v_summary', 'coherence_c_v_summary.jsonl')}")
except FileNotFoundError as e:
LOGGER.info(f"Combo ({docs_type}, {corpus_type}) with n_topics {n_topics} has not been computed yet.")
# adapted from https://www.machinelearningplus.com/nlp/topic-modeling-gensim-python/
def extract_top_topic_per_doc(model, corpus_type, corpus, docs_type, docs):
"""For each document, extract the topic with the highest probability.
Args:
model (obj): gensim LDA model
corpus_type (str): "tfidf", "count"
corpus (obj): gensim corpus
docs_type (str): "leads", "texts", "titles"
docs (list): list of strings
Returns:
df_topics (dataframe): dataframe where each row is a document.
- columns: 'top_topic', 'top_topic_pct', 'topic_tokens', 'doc_tokens'
Reference: https://nlp.stanford.edu/events/illvi2014/papers/sievert-illvi2014.pdf
- LDAvis: A method for visualizing and interpreting topics
- Terms are sorted by pyLDAvis' method of relevance
(p. 66) we define the relevance of a term w to a topic k given a weight parameter λ (where 0 λ 1) as:
r(w, k | λ) = λlog(Φkw) + (1 - λ) log(Φkw/pw)
Let Φkw/pw denote the probability of term w ∈ {1, ..., V} for topic k ∈ {1, ..., K}, where V denotes the number of terms in the vocabulary, and let pw denote the marginal probability of term w in the corpus.
λ determines the weight given to the probability of term w under topic k relative to its lift (measuring both on the log scale).
Setting λ = 1 results in the familiar ranking of terms in decreasing order of their topic-specific probability,
and setting λ = 0 ranks terms solely by their lift.
(p. 65): Taddy (2011) uses an intrinsic measure to rank terms within topics: a quantity called lift, defined as the ratio of a term’s probability within a topic to its marginal probability across the corpus.
This generally decreases the rankings of globally frequent terms, which can be helpful.
We find that it can be noisy, however, by giving high rankings to very rare terms that occur in only a single topic, for instance.
While such terms may contain useful topical content, if they are very rare the topic may remain difficult to interpret.
"""
model_name = f"lda_corpus_{corpus_type}_docs_{docs_type}_topics_{model.num_topics}"
LOGGER = get_logger(filename = f'{model_name}_top_topic', logger_type='main')
# Init output
df_topics = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(model.get_document_topics(corpus)):
if i % 10000 == 0:
LOGGER.info(f"processing top topic for document {i}")
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = model.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
df_topics = df_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
# Add original text to the end of the output
contents = | pd.Series(docs) | pandas.Series |
from itertools import product
import numpy as np
from numpy import ma
import pandas as pd
import pytest
from scipy import sparse as sp
from scipy.sparse import csr_matrix, issparse
from anndata import AnnData
from anndata.tests.helpers import assert_equal, gen_adata
# some test objects that we use below
adata_dense = AnnData(np.array([[1, 2], [3, 4]]))
adata_dense.layers["test"] = adata_dense.X
adata_sparse = AnnData(
csr_matrix([[0, 2, 3], [0, 5, 6]]),
dict(obs_names=["s1", "s2"], anno1=["c1", "c2"]),
dict(var_names=["a", "b", "c"]),
)
def test_creation():
AnnData(np.array([[1, 2], [3, 4]]))
AnnData(np.array([[1, 2], [3, 4]]), {}, {})
AnnData(ma.array([[1, 2], [3, 4]]), uns=dict(mask=[0, 1, 1, 0]))
AnnData(sp.eye(2))
X = np.array([[1, 2, 3], [4, 5, 6]])
adata = AnnData(
X=X,
obs=dict(Obs=["A", "B"]),
var=dict(Feat=["a", "b", "c"]),
obsm=dict(X_pca=np.array([[1, 2], [3, 4]])),
raw=dict(X=X, var=dict(var_names=["a", "b", "c"])),
)
assert adata.raw.X.tolist() == X.tolist()
assert adata.raw.var_names.tolist() == ["a", "b", "c"]
with pytest.raises(ValueError):
AnnData(np.array([[1, 2], [3, 4]]), dict(TooLong=[1, 2, 3, 4]))
# init with empty data matrix
shape = (3, 5)
adata = AnnData(None, uns=dict(test=np.array((3, 3))), shape=shape)
assert adata.X is None
assert adata.shape == shape
assert "test" in adata.uns
def test_create_with_dfs():
X = np.ones((6, 3))
obs = pd.DataFrame(dict(cat_anno=pd.Categorical(["a", "a", "a", "a", "b", "a"])))
obs_copy = obs.copy()
adata = AnnData(X=X, obs=obs)
assert obs.index.equals(obs_copy.index)
assert obs.index.astype(str).equals(adata.obs.index)
def test_create_from_df():
df = pd.DataFrame(np.ones((3, 2)), index=["a", "b", "c"], columns=["A", "B"])
ad = AnnData(df)
assert df.values.tolist() == ad.X.tolist()
assert df.columns.tolist() == ad.var_names.tolist()
assert df.index.tolist() == ad.obs_names.tolist()
def test_create_from_sparse_df():
s = sp.random(20, 30, density=0.2)
obs_names = [f"obs{i}" for i in range(20)]
var_names = [f"var{i}" for i in range(30)]
df = pd.DataFrame.sparse.from_spmatrix(s, index=obs_names, columns=var_names)
a = AnnData(df)
b = AnnData(s, obs=pd.DataFrame(index=obs_names), var=pd.DataFrame(index=var_names))
assert_equal(a, b)
assert issparse(a.X)
def test_create_from_df_with_obs_and_var():
df = pd.DataFrame(np.ones((3, 2)), index=["a", "b", "c"], columns=["A", "B"])
obs = pd.DataFrame(np.ones((3, 1)), index=df.index, columns=["C"])
var = pd.DataFrame(np.ones((2, 1)), index=df.columns, columns=["D"])
ad = AnnData(df, obs=obs, var=var)
assert df.values.tolist() == ad.X.tolist()
assert df.columns.tolist() == ad.var_names.tolist()
assert df.index.tolist() == ad.obs_names.tolist()
assert obs.equals(ad.obs)
assert var.equals(ad.var)
with pytest.raises(ValueError, match=r"Index of obs must match index of X."):
AnnData(df, obs=obs.reset_index())
with pytest.raises(ValueError, match=r"Index of var must match columns of X."):
AnnData(df, var=var.reset_index())
def test_from_df_and_dict():
df = pd.DataFrame(dict(a=[0.1, 0.2, 0.3], b=[1.1, 1.2, 1.3]))
adata = AnnData(df, dict(species=pd.Categorical(["a", "b", "a"])))
assert adata.obs["species"].values.tolist() == ["a", "b", "a"]
def test_df_warnings():
df = pd.DataFrame(dict(A=[1, 2, 3], B=[1.0, 2.0, 3.0]), index=["a", "b", "c"])
with pytest.warns(UserWarning, match=r"X.*dtype float64"):
adata = AnnData(df)
with pytest.warns(UserWarning, match=r"X.*dtype float64"):
adata.X = df
def test_attr_deletion():
full = gen_adata((30, 30))
# Empty has just X, obs_names, var_names
empty = AnnData(None, obs=full.obs[[]], var=full.var[[]])
for attr in ["X", "obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]:
delattr(full, attr)
assert_equal(getattr(full, attr), getattr(empty, attr))
assert_equal(full, empty, exact=True)
def test_names():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
assert adata.obs_names.tolist() == "A B".split()
assert adata.var_names.tolist() == "a b c".split()
adata = AnnData(np.array([[1, 2], [3, 4], [5, 6]]), var=dict(var_names=["a", "b"]))
assert adata.var_names.tolist() == ["a", "b"]
@pytest.mark.parametrize(
"names,after",
[
pytest.param(["a", "b"], None, id="list"),
pytest.param(
pd.Series(["AAD", "CCA"], name="barcodes"), "barcodes", id="Series-str"
),
pytest.param(pd.Series(["x", "y"], name=0), None, id="Series-int"),
],
)
@pytest.mark.parametrize("attr", ["obs_names", "var_names"])
def test_setting_index_names(names, after, attr):
adata = adata_dense.copy()
assert getattr(adata, attr).name is None
setattr(adata, attr, names)
assert getattr(adata, attr).name == after
if hasattr(names, "name"):
assert names.name is not None
# Testing for views
new = adata[:, :]
assert new.is_view
setattr(new, attr, names)
assert_equal(new, adata, exact=True)
assert not new.is_view
@pytest.mark.parametrize("attr", ["obs_names", "var_names"])
def test_setting_index_names_error(attr):
orig = adata_sparse[:2, :2]
adata = adata_sparse[:2, :2]
assert getattr(adata, attr).name is None
with pytest.raises(ValueError, match=fr"AnnData expects \.{attr[:3]}\.index\.name"):
setattr(adata, attr, pd.Index(["x", "y"], name=0))
assert adata.is_view
assert getattr(adata, attr).tolist() != ["x", "y"]
assert getattr(adata, attr).tolist() == getattr(orig, attr).tolist()
assert_equal(orig, adata, exact=True)
@pytest.mark.parametrize("dim", ["obs", "var"])
def test_setting_dim_index(dim):
index_attr = f"{dim}_names"
mapping_attr = f"{dim}m"
orig = gen_adata((5, 5))
orig.raw = orig
curr = orig.copy()
view = orig[:, :]
new_idx = pd.Index(list("abcde"), name="letters")
setattr(curr, index_attr, new_idx)
pd.testing.assert_index_equal(getattr(curr, index_attr), new_idx)
pd.testing.assert_index_equal(getattr(curr, mapping_attr)["df"].index, new_idx)
pd.testing.assert_index_equal(getattr(curr, mapping_attr).dim_names, new_idx)
pd.testing.assert_index_equal(curr.obs_names, curr.raw.obs_names)
# Testing view behaviour
setattr(view, index_attr, new_idx)
assert not view.is_view
pd.testing.assert_index_equal(getattr(view, index_attr), new_idx)
pd.testing.assert_index_equal(getattr(view, mapping_attr)["df"].index, new_idx)
pd.testing.assert_index_equal(getattr(view, mapping_attr).dim_names, new_idx)
with pytest.raises(AssertionError):
pd.testing.assert_index_equal(
getattr(view, index_attr), getattr(orig, index_attr)
)
assert_equal(view, curr, exact=True)
# test case in #459
fake_m = pd.DataFrame(curr.X.T, index=getattr(curr, index_attr))
getattr(curr, mapping_attr)["df2"] = fake_m
def test_indices_dtypes():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
adata.obs_names = ["ö", "a"]
assert adata.obs_names.tolist() == ["ö", "a"]
def test_slicing():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
# assert adata[:, 0].X.tolist() == adata.X[:, 0].tolist() # No longer the case
assert adata[0, 0].X.tolist() == np.reshape(1, (1, 1)).tolist()
assert adata[0, :].X.tolist() == np.reshape([1, 2, 3], (1, 3)).tolist()
assert adata[:, 0].X.tolist() == np.reshape([1, 4], (2, 1)).tolist()
assert adata[:, [0, 1]].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, np.array([0, 2])].X.tolist() == [[1, 3], [4, 6]]
assert adata[:, np.array([False, True, True])].X.tolist() == [
[2, 3],
[5, 6],
]
assert adata[:, 1:3].X.tolist() == [[2, 3], [5, 6]]
assert adata[0:2, :][:, 0:2].X.tolist() == [[1, 2], [4, 5]]
assert adata[0:1, :][:, 0:2].X.tolist() == np.reshape([1, 2], (1, 2)).tolist()
assert adata[0, :][:, 0].X.tolist() == np.reshape(1, (1, 1)).tolist()
assert adata[:, 0:2][0:2, :].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, 0:2][0:1, :].X.tolist() == np.reshape([1, 2], (1, 2)).tolist()
assert adata[:, 0][0, :].X.tolist() == np.reshape(1, (1, 1)).tolist()
def test_boolean_slicing():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
obs_selector = np.array([True, False], dtype=bool)
vars_selector = np.array([True, False, False], dtype=bool)
assert adata[obs_selector, :][:, vars_selector].X.tolist() == [[1]]
assert adata[:, vars_selector][obs_selector, :].X.tolist() == [[1]]
assert adata[obs_selector, :][:, 0].X.tolist() == [[1]]
assert adata[:, 0][obs_selector, :].X.tolist() == [[1]]
assert adata[0, :][:, vars_selector].X.tolist() == [[1]]
assert adata[:, vars_selector][0, :].X.tolist() == [[1]]
obs_selector = np.array([True, False], dtype=bool)
vars_selector = np.array([True, True, False], dtype=bool)
assert adata[obs_selector, :][:, vars_selector].X.tolist() == [[1, 2]]
assert adata[:, vars_selector][obs_selector, :].X.tolist() == [[1, 2]]
assert adata[obs_selector, :][:, 0:2].X.tolist() == [[1, 2]]
assert adata[:, 0:2][obs_selector, :].X.tolist() == [[1, 2]]
assert adata[0, :][:, vars_selector].X.tolist() == [[1, 2]]
assert adata[:, vars_selector][0, :].X.tolist() == [[1, 2]]
obs_selector = np.array([True, True], dtype=bool)
vars_selector = np.array([True, True, False], dtype=bool)
assert adata[obs_selector, :][:, vars_selector].X.tolist() == [
[1, 2],
[4, 5],
]
assert adata[:, vars_selector][obs_selector, :].X.tolist() == [
[1, 2],
[4, 5],
]
assert adata[obs_selector, :][:, 0:2].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, 0:2][obs_selector, :].X.tolist() == [[1, 2], [4, 5]]
assert adata[0:2, :][:, vars_selector].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, vars_selector][0:2, :].X.tolist() == [[1, 2], [4, 5]]
def test_oob_boolean_slicing():
len1, len2 = np.random.choice(100, 2, replace=False)
with pytest.raises(IndexError) as e:
AnnData(np.empty((len1, 100)))[np.random.randint(0, 2, len2, dtype=bool), :]
assert str(len1) in str(e.value)
assert str(len2) in str(e.value)
len1, len2 = np.random.choice(100, 2, replace=False)
with pytest.raises(IndexError) as e:
AnnData(np.empty((100, len1)))[:, np.random.randint(0, 2, len2, dtype=bool)]
assert str(len1) in str(e.value)
assert str(len2) in str(e.value)
def test_slicing_strings():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
assert adata["A", "a"].X.tolist() == [[1]]
assert adata["A", :].X.tolist() == [[1, 2, 3]]
assert adata[:, "a"].X.tolist() == [[1], [4]]
assert adata[:, ["a", "b"]].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, np.array(["a", "c"])].X.tolist() == [[1, 3], [4, 6]]
assert adata[:, "b":"c"].X.tolist() == [[2, 3], [5, 6]]
with pytest.raises(KeyError):
_ = adata[:, "X"]
with pytest.raises(KeyError):
_ = adata["X", :]
with pytest.raises(KeyError):
_ = adata["A":"X", :]
with pytest.raises(KeyError):
_ = adata[:, "a":"X"]
# Test if errors are helpful
with pytest.raises(KeyError, match=r"not_in_var"):
adata[:, ["A", "B", "not_in_var"]]
with pytest.raises(KeyError, match=r"not_in_obs"):
adata[["A", "B", "not_in_obs"], :]
def test_slicing_graphs():
# Testing for deprecated behaviour of connectivity matrices in .uns["neighbors"]
with pytest.warns(FutureWarning, match=r".obsp\['connectivities'\]"):
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6]]),
uns=dict(neighbors=dict(connectivities=sp.csr_matrix(np.ones((3, 3))))),
)
adata_sub = adata[[0, 1], :]
with pytest.warns(FutureWarning):
assert adata_sub.uns["neighbors"]["connectivities"].shape[0] == 2
assert adata.uns["neighbors"]["connectivities"].shape[0] == 3
assert adata_sub.copy().uns["neighbors"]["connectivities"].shape[0] == 2
def test_slicing_series():
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6]]),
dict(obs_names=["A", "B", "C"]),
dict(var_names=["a", "b"]),
)
df = pd.DataFrame(dict(a=["1", "2", "2"]))
df1 = pd.DataFrame(dict(b=["1", "2"]))
assert adata[df["a"].values == "2"].X.tolist() == adata[df["a"] == "2"].X.tolist()
assert (
adata[:, df1["b"].values == "2"].X.tolist()
== adata[:, df1["b"] == "2"].X.tolist()
)
def test_strings_to_categoricals():
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), dict(k=["a", "a", "b", "b"])
)
adata.strings_to_categoricals()
assert adata.obs["k"].cat.categories.tolist() == ["a", "b"]
def test_slicing_remove_unused_categories():
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), dict(k=["a", "a", "b", "b"])
)
adata._sanitize()
assert adata[2:4].obs["k"].cat.categories.tolist() == ["b"]
def test_get_subset_annotation():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(S=["A", "B"]),
dict(F=["a", "b", "c"]),
)
assert adata[0, 0].obs["S"].tolist() == ["A"]
assert adata[0, 0].var["F"].tolist() == ["a"]
def test_append_col():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
adata.obs["new"] = [1, 2]
# this worked in the initial AnnData, but not with a dataframe
# adata.obs[['new2', 'new3']] = [['A', 'B'], ['c', 'd']]
with pytest.raises(ValueError):
adata.obs["new4"] = "far too long".split()
def test_delete_col():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]), dict(o1=[1, 2], o2=[3, 4]))
assert ["o1", "o2"] == adata.obs_keys()
del adata.obs["o1"]
assert ["o2"] == adata.obs_keys()
assert [3, 4] == adata.obs["o2"].tolist()
def test_set_obs():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
adata.obs = pd.DataFrame(dict(a=[3, 4]))
assert adata.obs_names.tolist() == [0, 1]
with pytest.raises(ValueError):
adata.obs = pd.DataFrame(dict(a=[3, 4, 5]))
adata.obs = dict(a=[1, 2])
def test_multicol():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
# 'c' keeps the columns as should be
adata.obsm["c"] = np.array([[0.0, 1.0], [2, 3]])
assert adata.obsm_keys() == ["c"]
assert adata.obsm["c"].tolist() == [[0.0, 1.0], [2, 3]]
def test_n_obs():
adata = AnnData(np.array([[1, 2], [3, 4], [5, 6]]))
assert adata.n_obs == 3
adata1 = adata[:2]
assert adata1.n_obs == 2
def test_equality_comparisons():
adata1 = AnnData(np.array([[1, 2], [3, 4], [5, 6]]))
adata2 = AnnData(np.array([[1, 2], [3, 4], [5, 6]]))
with pytest.raises(NotImplementedError):
adata1 == adata1
with pytest.raises(NotImplementedError):
adata1 == adata2
with pytest.raises(NotImplementedError):
adata1 != adata2
with pytest.raises(NotImplementedError):
adata1 == 1
with pytest.raises(NotImplementedError):
adata1 != 1
def test_rename_categories():
X = np.ones((6, 3))
obs = pd.DataFrame(dict(cat_anno=pd.Categorical(["a", "a", "a", "a", "b", "a"])))
adata = AnnData(X=X, obs=obs)
adata.uns["tool"] = {}
adata.uns["tool"]["cat_array"] = np.rec.fromarrays(
[np.ones(2) for cat in adata.obs["cat_anno"].cat.categories],
dtype=[(cat, "float32") for cat in adata.obs["cat_anno"].cat.categories],
)
adata.uns["tool"]["params"] = dict(groupby="cat_anno")
new_categories = ["c", "d"]
adata.rename_categories("cat_anno", new_categories)
assert list(adata.obs["cat_anno"].cat.categories) == new_categories
assert list(adata.uns["tool"]["cat_array"].dtype.names) == new_categories
def test_pickle():
import pickle
adata = AnnData()
adata2 = pickle.loads(pickle.dumps(adata))
assert adata2.obsm.parent is adata2
def test_to_df_dense():
X_df = adata_dense.to_df()
layer_df = adata_dense.to_df(layer="test")
np.testing.assert_array_equal(adata_dense.layers["test"], layer_df.values)
np.testing.assert_array_equal(adata_dense.X, X_df.values)
pd.testing.assert_index_equal(X_df.columns, layer_df.columns)
pd.testing.assert_index_equal(X_df.index, layer_df.index)
def test_convenience():
adata = adata_sparse.copy()
adata.layers["x2"] = adata.X * 2
adata.var["anno2"] = ["p1", "p2", "p3"]
adata.raw = adata
adata.X = adata.X / 2
adata_dense = adata.copy()
adata_dense.X = adata_dense.X.toarray()
def assert_same_op_result(a1, a2, op):
r1 = op(a1)
r2 = op(a2)
assert np.all(r1 == r2)
assert type(r1) is type(r2)
assert np.allclose(adata.obs_vector("b"), np.array([1.0, 2.5]))
assert np.allclose(adata.raw.obs_vector("c"), np.array([3, 6]))
assert np.all(adata.obs_vector("anno1") == np.array(["c1", "c2"]))
assert np.allclose(adata.var_vector("s1"), np.array([0, 1.0, 1.5]))
assert np.allclose(adata.raw.var_vector("s2"), np.array([0, 5, 6]))
for obs_k, layer in product(["a", "b", "c", "anno1"], [None, "x2"]):
assert_same_op_result(
adata, adata_dense, lambda x: x.obs_vector(obs_k, layer=layer)
)
for obs_k in ["a", "b", "c"]:
assert_same_op_result(adata, adata_dense, lambda x: x.raw.obs_vector(obs_k))
for var_k, layer in product(["s1", "s2", "anno2"], [None, "x2"]):
assert_same_op_result(
adata, adata_dense, lambda x: x.var_vector(var_k, layer=layer)
)
for var_k in ["s1", "s2", "anno2"]:
assert_same_op_result(adata, adata_dense, lambda x: x.raw.var_vector(var_k))
def test_1d_slice_dtypes():
N, M = 10, 20
obs_df = pd.DataFrame(
dict(
cat=pd.Categorical(np.arange(N, dtype=int)),
int=np.arange(N, dtype=int),
float=np.arange(N, dtype=float),
obj=[str(i) for i in np.arange(N, dtype=int)],
),
index=[f"cell{i}" for i in np.arange(N, dtype=int)],
)
var_df = pd.DataFrame(
dict(
cat=pd.Categorical(np.arange(M, dtype=int)),
int=np.arange(M, dtype=int),
float=np.arange(M, dtype=float),
obj=[str(i) for i in np.arange(M, dtype=int)],
),
index=[f"gene{i}" for i in np.arange(M, dtype=int)],
)
adata = AnnData(X=np.random.random((N, M)), obs=obs_df, var=var_df)
new_obs_df = pd.DataFrame(index=adata.obs_names)
for k in obs_df.columns:
new_obs_df[k] = adata.obs_vector(k)
assert new_obs_df[k].dtype == obs_df[k].dtype
assert np.all(new_obs_df == obs_df)
new_var_df = | pd.DataFrame(index=adata.var_names) | pandas.DataFrame |
"""
Includes classes and functions to test and select the optimal
betting strategy on historical and current data.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
from argparse import ArgumentParser
from ast import literal_eval
from itertools import product
from os.path import join
from sqlite3 import connect
from abc import abstractmethod
from importlib import import_module
from joblib import delayed, Parallel
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.model_selection import ParameterGrid
from sklearn.utils import check_random_state, check_X_y, check_array
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from sportsbet import SOCCER_PATH
from sportsbet.externals import TimeSeriesSplit
from sportsbet.soccer import TARGETS
from sportsbet.soccer.config import CONFIG
DB_CONNECTION = connect(join(SOCCER_PATH, 'soccer.db'))
def extract_multi_labels(score1, score2, targets):
"""Extract multi-labels matrix for multi-output classification."""
# Check input data
score1 = check_array(score1, dtype=int, ensure_2d=False)
score2 = check_array(score2, dtype=int, ensure_2d=False)
targets = check_array(targets, dtype=object, ensure_2d=False)
# Generate multi-labels
multi_labels = np.column_stack([TARGETS[target](score1, score2) for target in targets]).astype(int)
return multi_labels
def extract_class_labels(score1, score2, odds, targets):
"""Extract class labels for multi-class classification."""
# Check input data
odds = check_array(odds)
# Generate class labels
multi_labels = extract_multi_labels(score1, score2, targets)
indices = (multi_labels * odds).argmax(axis=1)
class_labels = np.array([targets[ind] for ind in indices])
class_labels[multi_labels.sum(axis=1) == 0] = '-'
return class_labels
def calculate_yields(score1, score2, bets, odds, targets):
"""Calculate the yields."""
# Check odds
odds = check_array(odds)
targets = check_array(targets, dtype=object, ensure_2d=False)
# Generate yields
bets = MultiLabelBinarizer(classes=['-'] + targets.tolist()).fit_transform([[bet] for bet in bets])[:, 1:]
yields = ((extract_multi_labels(score1, score2, targets) * odds - 1.0) * bets).sum(axis=1)
return yields
def extract_yields_stats(yields):
"""Extract coverage, mean and std of yields."""
coverage_mask = (yields != 0.0)
return coverage_mask.mean(), yields[coverage_mask].mean(), yields[coverage_mask].std()
def check_random_states(random_state, repetitions):
"""Create random states for experiments."""
random_state = check_random_state(random_state)
return [random_state.randint(0, 2 ** 32 - 1, dtype='uint32') for _ in range(repetitions)]
def fit_bet(bettor, params, risk_factors, random_state, X, scores, odds, train_indices, test_indices):
"""Parallel fit and bet"""
# Unpack scores
avg_score1, avg_score2, score1, score2 = scores
# Set random state
for param_name in bettor.get_params():
if 'random_state' in param_name:
bettor.set_params(**{param_name: random_state})
# Fit better
bettor.set_params(**params).fit(X[train_indices], avg_score1[train_indices], avg_score2[train_indices], odds[train_indices])
# Generate data
data = []
for risk_factor in risk_factors:
bets = bettor.bet(X[test_indices], risk_factor)
yields = calculate_yields(score1[test_indices], score2[test_indices], bets, odds[test_indices], bettor.targets_)
data.append((str(params), random_state, risk_factor, yields))
data = pd.DataFrame(data, columns=['parameters', 'experiment', 'risk_factor', 'yields'])
return data
def apply_backtesting(bettor, param_grid, risk_factors, X, scores, odds, cv, random_state, n_runs, n_jobs):
"""Apply backtesting to evaluate bettor."""
# Check random states
random_states = check_random_states(random_state, n_runs)
# Check arrays
X = check_array(X, dtype=None, force_all_finite=False)
normalized_scores = []
for score in scores:
normalized_scores.append(check_array(score, dtype=None, ensure_2d=False))
odds = check_array(odds, dtype=None)
# Extract parameters
parameters = ParameterGrid(param_grid)
# Run backtesting
data = Parallel(n_jobs=n_jobs)(delayed(fit_bet)(bettor, params, risk_factors, random_state, X, normalized_scores, odds, train_indices, test_indices)
for params, random_state, (train_indices, test_indices) in tqdm(list(product(parameters, random_states, cv.split(X))), desc='Tasks'))
# Combine data
data = pd.concat(data, ignore_index=True)
data = data.groupby(['parameters', 'risk_factor', 'experiment']).apply(lambda df: np.concatenate(df.yields.values)).reset_index()
data[['coverage', 'mean_yield', 'std_yield']] = pd.DataFrame(data[0].apply(lambda yields: extract_yields_stats(yields)).values.tolist())
# Calculate results
results = data.drop(columns=['experiment', 0]).groupby(['parameters', 'risk_factor']).mean().reset_index()
results['std_mean_yield'] = data.groupby(['parameters', 'risk_factor'])['mean_yield'].std().values
results = results.sort_values('mean_yield', ascending=False).reset_index(drop=True)
return results
class BettorMixin:
"""Mixin class for all bettors."""
_estimator_type = 'bettor'
def __init__(self, targets):
self.targets = targets
@abstractmethod
def predict(self, X):
"""Predict class labels."""
pass
@abstractmethod
def predict_proba(self, X):
"""Predict probabilities."""
pass
def fit(self):
"""Fit base bettor."""
# Check targets
if self.targets is None:
self.targets_ = np.array(list(TARGETS.keys()))
else:
if not set(self.targets).issubset(TARGETS.keys()):
raise ValueError(f'Targets should be any of {", ".join(self.targets)}')
else:
self.targets_ = check_array(self.targets, dtype=None, ensure_2d=False)
return self
def bet(self, X, risk_factor):
"""Generate bets."""
# Check risk factor
if not isinstance(risk_factor, float) or risk_factor > 1.0 or risk_factor < 0.0:
raise ValueError('Risk factor should be a float in the [0.0, 1.0] interval.')
# Generate bets
bets = self.predict(X)
# Apply no bets
bets[self.predict_proba(X).max(axis=1) <= risk_factor] = '-'
return bets
class Bettor(BaseEstimator, BettorMixin):
"""Bettor class that uses a multi-class classifier."""
def __init__(self, classifier, targets=None):
super(Bettor, self).__init__(targets)
self.classifier = classifier
def fit(self, X, score1, score2, odds):
"""Fit the classifier."""
super(Bettor, self).fit()
# Extract targets
y = extract_class_labels(score1, score2, odds, self.targets_)
# Fit classifier
self.classifier_ = clone(self.classifier).fit(X, y)
return self
def predict(self, X):
"""Predict class labels."""
return self.classifier_.predict(X)
def predict_proba(self, X):
"""Predict class probabilities."""
return self.classifier_.predict_proba(X)
class MultiBettor(BaseEstimator, BettorMixin):
"""Bettor class that uses a multi-output classifier."""
def __init__(self, multi_classifier, meta_classifier, test_size=0.5, random_state=None, targets=None):
super(MultiBettor, self).__init__(targets)
self.multi_classifier = multi_classifier
self.meta_classifier = meta_classifier
self.test_size = test_size
self.random_state = random_state
def fit(self, X, score1, score2, odds):
"""Fit the multi-output classifier."""
super(MultiBettor, self).fit()
# Split data
X_multi, X_meta, score1_multi, score1_meta, score2_multi, score2_meta, _, odds_meta = train_test_split(
X, score1, score2, odds,
test_size=self.test_size,
random_state=self.random_state
)
# Extract targets
Y_multi = extract_multi_labels(score1_multi, score2_multi, self.targets_)
y_meta = extract_class_labels(score1_meta, score2_meta, odds_meta, self.targets_)
# Fit multi-classifier
self.multi_classifier_ = clone(self.multi_classifier).fit(X_multi, Y_multi)
# Fit meta-classifier
X_meta = np.column_stack([probs[:, 0] for probs in self.multi_classifier_.predict_proba(X_meta)])
self.meta_classifier_ = clone(self.meta_classifier).fit(X_meta, y_meta)
return self
def predict(self, X):
"""Predict class labels."""
X_meta = np.column_stack([probs[:, 0] for probs in self.multi_classifier_.predict_proba(X)])
return self.meta_classifier_.predict(X_meta)
def predict_proba(self, X):
"""Predict class probabilities."""
X_meta = np.column_stack([probs[:, 0] for probs in self.multi_classifier_.predict_proba(X)])
return self.meta_classifier_.predict_proba(X_meta)
def extract_bettor():
"""Extract bettor from configuration file."""
bettor_name = CONFIG['bettor']['type']
bettor_params = CONFIG['bettor']['parameters']
bettor_class = getattr(import_module(__name__), bettor_name)
if bettor_name == 'Bettor':
bettor = bettor_class(bettor_params['classifier'], bettor_params['targets'])
elif bettor_name == 'MultiBettor':
bettor = bettor_class(bettor_params['multi_classifier'], bettor_params['meta_classifier'], bettor_params['test_size'], targets=bettor_params['targets'])
return bettor
def load_X(training=True):
"""Load input data."""
tbl = "X" if training else "X_test"
X_cols = [f'"{col}"' for col in pd.read_sql(f'PRAGMA table_info({tbl})', DB_CONNECTION)['name'] if col not in CONFIG['excluded_features']]
X = pd.read_sql(f'select {", ".join(X_cols)} from {tbl}', DB_CONNECTION)
return X
def load_odds(bettor, training=True):
"""Load odds data."""
tbl = "odds" if training else "odds_test"
odds_cols = [f'"{col}"' for col in (TARGETS.keys() if bettor.targets is None else bettor.targets)]
odds = pd.read_sql(f'select {", ".join(odds_cols)} from {tbl}', DB_CONNECTION)
return odds
def load_scores():
"""Load scores data."""
y = | pd.read_sql('select * from y', DB_CONNECTION) | pandas.read_sql |
# Custom Modules
import avaxtar
from avaxtar import Avax_NN
from avaxtar import DF_from_DICT
# Py Data Stack
import numpy as np
import pandas as pd
# Neural Network
import torch
import torch.nn as nn
import torch.nn.functional as F
# Feature Engineering
import sent2vec
# File Manipulation
from glob import glob
import joblib
import os
import gdown
#from google_drive_downloader import GoogleDriveDownloader as gdd
# Feature Scaling
from sklearn.preprocessing import RobustScaler
# Twitter
import tweepy
import requests
# NLP
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
class AvaxModel():
def __init__(self, consumer_key=None, consumer_secret=None, access_token=None, access_secret=None, bearer_token=None):
super(AvaxModel, self).__init__()
# Connect to Twitter
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_secret = access_secret
self.api_v1_connection = False
if consumer_key and consumer_secret and access_token and access_secret:
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token,access_secret)
self.api = tweepy.API(self.auth, retry_count=5, retry_delay=2, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
self.api_v1_connection = True
self.api_v2_connection = False
if bearer_token:
self.headers = {"Authorization": f"Bearer {bearer_token}"}
self.api_v2_connection = True
self.package_path = os.path.dirname(avaxtar.__file__)
# Load sent2vec model
if "wiki_unigrams.bin" not in os.listdir(self.package_path):
print("Downloading sent2vec model...")
#url = 'https://drive.google.com/u/0/uc?id=0B6VhzidiLvjSa19uYWlLUEkzX3c'
url = 'https://drive.google.com/uc?id=1f_XhwJvJek5qXQUlODHqBidcxqJpw0IW'
output = self.package_path + '/wiki_unigrams.bin'
gdown.download(url, output, quiet=False)
self.sent2vec_model = sent2vec.Sent2vecModel()
self.sent2vec_model.load_model('/' + self.package_path + '/' + 'wiki_unigrams.bin')#, inference_mode=True)
# Load trained scaler
self.scaler = joblib.load(self.package_path + '/' + 'scaler1.joblib')
# Tokenizer
#self.tknzr = TweetTokenizer(preserve_case=False, reduce_len=False, strip_handles=False)
self.stopW = stopwords.words('english')
self.stopW.append("https://t.co")
def predict_from_userid_api_v1(self, userid):
if self.api_v1_connection:
# Given a user ID, crawl its last 3000 tweets as a list of dictionaries
user_timeline = [status._json for status in tweepy.Cursor(self.api.user_timeline, id=userid).items(100)]
#print(f'User: {userid}. Timeline length: {len(user_timeline)}')
# Extract all the features from the list of dictionaries and convert it to a datadframe
df = DF_from_DICT.main(user_timeline)
# Generate timeseries features based on a user's tweets
#timeseries_features = Feature_Engineering.prediction_timeseries_generator(df, self.scaler, sent2vec_model=self.sent2vec_model)
timeseries_features = self.prediction_timeseries_generator_text_only(df.token.to_list(), self.scaler, sent2vec_model=self.sent2vec_model)
# Setting the device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Load trained model
model = Avax_NN.AvaxNN(num_features=timeseries_features.shape[1],
learning_rate=1e-4,
optimizer=torch.optim.AdamW,
loss_fn=nn.BCELoss(),
device=device)
model.load_state_dict(torch.load(self.package_path + '/' + 'model_pytorch1.pt', map_location=torch.device(device)))
model.eval()
# Send model to the device
model = model.to(device)
# Predict
pred_proba = model.predict_proba(timeseries_features)
return pred_proba
else:
print("In order to predict from a user id you need to input your twitter credentials to the class constructor. Please pass 'consumer_key', 'consumer_secret', 'access_token', 'access_secret'.")
def predict_from_userid_api_v2(self, userid):
if self.api_v2_connection:
# If a screen name was passed, convert to user id
if str(userid).isdigit() == False:
if self.api_v1_connection:
user = self.api.get_user(userid)
userid = user.id_str
#print(f"Converted user id {userid}")
else:
raise ValueError("The input is not an user id. If you are trying to predict from a screen name, please connect to the v1 api.")
# Df to store user tweets
df_all = | pd.DataFrame() | pandas.DataFrame |
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from collections import Counter
from nltk import word_tokenize
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
import datetime as dt
import time
import datetime
import streamlit as st
def getOrderedDictionary(data):
"""
:param data: (Series) --> sentences
:return: (string, integer) --> list with the words and
list with their frequencies
"""
dict_sentences = {}
for sentence in data:
tokens = word_tokenize(sentence)
for token in tokens:
if token in dict_sentences and token not in stopwords.words('english'):
dict_sentences[token] += 1
else:
dict_sentences[token] = 1
ordered_dict = {k: v for k, v in sorted(dict_sentences.items(), key=lambda item: item[1], reverse=True)}
return list(ordered_dict.keys()), list(ordered_dict.values())
def get_dic_sentiment(labels):
"""
:param labels: (Series) --> predictions of the sentiment analysis
:return: (dic) --> dictionary with the number of positive, negative and neutral values
"""
dic_sentiment = dict(Counter(labels))
if -1 in dic_sentiment:
dic_sentiment["Negative"] = dic_sentiment.pop(-1)
else:
dic_sentiment["Negative"] = 0
if 0 in dic_sentiment:
dic_sentiment["Neutral"] = dic_sentiment.pop(0)
else:
dic_sentiment["Neutral"] = 0
if 1 in dic_sentiment:
dic_sentiment["Positive"] = dic_sentiment.pop(1)
else:
dic_sentiment["Positive"] = 0
return dic_sentiment
def getCounters(data):
"""
:param data: (dataframe) --> dataframe containing the phrases and
their sentiment
:return: (lists) --> list with the length values for pos/neg/neutral values
"""
x_neg = data[data['sentiment'] == -1]["length"]
x_neutr = data[data['sentiment'] == 0]["length"]
x_pos = data[data['sentiment'] == 1]["length"]
x_neg_dic = pd.Series(x_neg.value_counts())
x_neutr_dic = pd.Series(x_neutr.value_counts())
x_pos = pd.Series(x_pos.value_counts())
return x_neg_dic, x_neutr_dic, x_pos
def unpackSeries(x):
"""
:param x: (Series) --> index : length of the titles
--> values : counter
:return:
"""
return x.index, x.values
def plot_piechart(labels):
"""
:param labels: (series) --> sentiment labels
:return: (plt)
"""
dic_sentiment = get_dic_sentiment(labels)
# negative, neutral and positive
colors = ['#FF4E11', '#FAB733', '#69B34C']
fig = make_subplots(rows=1, cols=1,
specs=[[{'type':'domain'}]],
subplot_titles=("POLYGLON"))
fig.add_trace(go.Pie(labels=list(dic_sentiment.keys()),
values=list(dic_sentiment.values()),
marker_colors=colors,
),
1, 1)
fig.update_traces(hoverinfo='label+percent')
fig.update(#layout_title_text='SENTIMENT ANALYSIS',
layout_showlegend=True)
return fig
def plot_informative_table(data):
"""
:param data: (Series) --> dates of the news
:return:
"""
# get minimum and maximum date
dates = [dt.datetime.strptime(date.split("T")[0], '%Y-%m-%d') for date in data.index]
n_samples = [len(data)]
min_dates = [str(min(dates)).split(" ")[0]]
max_dates = [str(max(dates)).split(" ")[0]]
df = pd.DataFrame({ 'Number of samples' : n_samples, 'From' : min_dates, 'To' : max_dates})
df = df.set_index([pd.Index(['Polyglon'])])
return df
def plot_most_frequent(data_twitter, data_yahoo, k=10):
"""
:param data: (Series) --> text of the tweets/headlines
:param k: (int) --> number of top words to show
:param dataset: (str) --> type of the dataset (twitter_data/yahoo)
:return: plot of the top k elements (most frequents)
"""
palette_twitter = px.colors.sequential.Teal_r
palette_yahoo = px.colors.sequential.BuPu_r
keys_twitter, values_twitter = getOrderedDictionary(data_twitter)
keys_yahoo, values_yahoo = getOrderedDictionary(data_yahoo)
fig = make_subplots(rows=1, cols=2,
subplot_titles=("Twitter", "Yahoo"))
fig.add_trace(go.Bar(x=values_twitter[:k][::-1], y=keys_twitter[:k][::-1], orientation='h'
#marker=dict(color=[4, 5, 6], coloraxis="coloraxis")
),
1, 1)
fig.add_trace(go.Bar(x=values_yahoo[:k][::-1], y=keys_yahoo[:k][::-1],orientation='h'
# marker=dict(color=[4, 5, 6], coloraxis="coloraxis")
),
1, 2)
#fig.update_layout(title="MOST FREQUENT WORDS")
return fig
def plot_length_distributions(data_t, labels_t, data_y, labels_y):
"""
:param data_t: (dataframe) --> twitter dataframe
:param labels_t: (series) --> twitter sentiment
:param data_y: (dataframe) --> yahoo dataframe
:param labels_y: (series) --> yahoo sentiment
:return: (graph) --> length distribution
"""
fig = make_subplots(subplot_titles=('Twitter', 'Yahoo'),
cols=1, rows=2,
vertical_spacing=0.1,
horizontal_spacing=0.1
)
# twitter
data_t = pd.DataFrame(data_t)
data_t['length'] = data_t.text.apply(lambda text: len(text))
data_t['sentiment'] = labels_t
twitter_neg, twitter_neutr, twitter_pos = getCounters(data_t)
twitter_hist = [twitter_neg, twitter_neutr, twitter_pos]
# yahoo
data_y = pd.DataFrame(data_y)
data_y['length'] = data_y.title.apply(lambda text: len(text))
data_y['sentiment'] = labels_y
yahoo_neg, yahoo_neutr, yahoo_pos = getCounters(data_y)
yahoo_hist = [yahoo_neg, yahoo_neutr, yahoo_pos]
group_labels = ["negative", "neutral", "positive"]
colors_twitter = ['#03045e', '#00b4d8', '#caf0f8']
colors_yahoo = ['#480ca8', '#7209b7', '#f72585']
# plots
fig_twitter = ff.create_distplot(twitter_hist, group_labels, colors=colors_twitter, curve_type='kde')
fig_yahoo = ff.create_distplot(yahoo_hist, group_labels, group_labels, colors=colors_yahoo, curve_type='kde')
distplot_left = fig_twitter['data']
distplot_right = fig_yahoo['data']
for i in range(6):
if i <= 2:
fig.append_trace(distplot_left[i], 1, 1)
fig.append_trace(distplot_right[i], 2, 1)
else:
fig.append_trace(distplot_left[i], 1, 1)
fig.append_trace(distplot_right[i], 2, 1)
fig.update_layout(#title_text=f'LENGTH DISTRIBUTION',
autosize=False, height=700)
return fig
def plot_length_distributionsV2(data_t, labels_t, data_y, labels_y):
"""
:param data_t: (dataframe) --> twitter dataframe
:param labels_t: (series) --> twitter sentiment
:param data_y: (dataframe) --> yahoo dataframe
:param labels_y: (series) --> yahoo sentiment
:return: (graph) --> length distribution
"""
# twitter
data_t = pd.DataFrame(data_t)
data_t['length'] = data_t.text.apply(lambda text: len(text))
data_t['sentiment'] = labels_t
data_t['source'] = data_t.text.apply(lambda text: 'Twitter')
# yahoo
data_y = pd.DataFrame(data_y)
data_y['length'] = data_y.text.apply(lambda text: len(text))
data_y['sentiment'] = labels_y
data_y['source'] = data_y.text.apply(lambda text: 'Yahoo')
dic_sentiment = {-1 : "negative", 0 : "neutral", 1:"positive"}
df_concat = data_t.append(data_y)
df_concat['sentiment'] = df_concat['sentiment'].apply(lambda x : dic_sentiment[x])
log_scale = st.checkbox("Logarithmic Scale")
fig = px.histogram(df_concat, x="length",
color="sentiment",
opacity=0.8,
facet_row="source",
log_y=log_scale, # represent bars with log scale
)
return fig
def plot_sentiment_trend(data, ticker):
"""
:param df: ( ) news and respective dates
:param labels: (list) predicted sentiment
:param ticker: (str) ticker of the stock
:return: (Figure) comparison among sentiment and price trends
"""
fig = make_subplots(2, 1, row_heights=[0.7, 0.3])
df = data.copy()
# filter just positive and negative news
# since we're showing the daily trend, we group by day
# we count the number of positive and negative news for each day
# to do so, we use three new columns
# |-- day -> feature on which we have to group
# |-- positive -> 1 if the news is positive, 0 otherwise
# |-- negative -> -1 if the news is negative, 0 otherwise
df['day'] = df.index
df['Positive'] = df.sentiment.apply(lambda s: 1 if s == 1 else 0)
df['Negative'] = df.sentiment.apply(lambda s: -1 if s == -1 else 0)
df['day'] = df['day'].apply(lambda d: d.split("T")[0])
aggregated = df.groupby('day').sum()[['Positive', 'Negative']]
start = int(time.mktime(datetime.datetime.strptime("2021-07-06", '%Y-%m-%d').timetuple()))
end = int(time.mktime(datetime.datetime.strptime("2021-07-16", '%Y-%m-%d').timetuple()))
interval = '1d'
query_string = f'https://query1.finance.yahoo.com/v7/finance/download/{ticker}?period1={start}&period2={end}&interval={interval}&events=history&includeAdjustedClose=true'
data_ticker_close = | pd.read_csv(query_string) | pandas.read_csv |
# -*- coding: utf-8 -*-
'''
Analysis module for analysis of frequency-dependence ("line shape analysis")
Author:
<NAME>,
Max Planck Institute of Microstructure Physics, Halle
Weinberg 2
06120 Halle
<EMAIL>
'''
''' Input zone '''
# ____________________________________________________________________________
# SETTINGS
import numpy as np
g = 2.07 # Lande constant of the FM [default (Permalloy): 2.07]
t = 20. # SH material film thickness in nm
terr = 1. # Error in SH material film thickness in nm
d = 10. # FM thickness in nm
derr = 1. # Error in FM thickness in nm
Ms = 1040. # Saturation magnetization FM layer emu/cm3
Mserr = 25. # Error in saturation magnetization FM layer emu/cm3
plotDpi = 300 # Resolution of plots [default: 600]
''' Input zone ends here. '''
# ____________________________________________________________________________
# Constants
mu0 = 4*np.pi*1e-7
e = 1.60218e-19 # C
me = 9.10938e-31 # kg
hbar = 1.054578e-34 # J s
gamma = e * g / (2 * me)
# ____________________________________________________________________________
# CODE
import tkinter as tk
from tkinter import filedialog
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from files import File
def kittel(H0, Meff, gamma, mu0):
''' With same unit system! '''
f = gamma * mu0 / (2*np.pi) * np.sqrt(H0 * (H0 + Meff))
return f
def gilbert(f, mu0, gamma, alpha, delta0):
''' Everything in SI '''
# print('GILBERT')
delta = f * 2*np.pi*alpha / (mu0 * gamma) + delta0
# print(f)
# print(gamma, alpha, delta0)
# print(delta)
return delta
def get_SHA(Vs, Va, Ms, t, d, hbar, Meff, H0):
''' all in SI '''
SHA = Vs/Va * (e*mu0*Ms*t*d) / (hbar) * np.sqrt(1 + Meff/H0)
return SHA
def get_SHAerr(e, mu0, hbar, Vs, Va, Ms, t, d, Meff, H0,
d_Vs, d_Va, d_Ms, d_t, d_d, d_Meff, d_H0):
c = e * mu0 / hbar
sq = np.sqrt(1+Meff/H0)
T1 = ( Ms*t*d/Va*sq*d_Vs )**2
T2 = ( Vs/Va**2 *Ms*t*d*sq*d_Va)**2
T3 = ( Vs/Va*t*d*sq*d_Ms )**2
T4 = ( Vs/Va *Ms*d*sq*d_t)**2
T5 = ( Vs/Va*Ms*t*sq*d_d)**2
T6 = ( Vs/Va*Ms*t*d/2*(1+Meff/H0)**(-1/2)/H0*d_Meff )**2
T7 = ( Vs/Va*Ms*t*d/2*(1*Meff/H0)**(-1/2)*Meff/H0**2*d_H0 )**2
return c * np.sqrt(T1+T2+T3+T4+T5+T6+T7)
def cgssi(quantity, value, conv):
if quantity == 'H':
c = 1e3/(4*np.pi)
elif quantity == 'B':
c = 1e-4
elif quantity == 'M':
c = 1e3
elif quantity == 'gamma':
c = 4*np.pi*1e-3
elif quantity == 'mu0':
c = 4*np.pi*1e-7
else:
raise ValueError('Quantity not defined')
if conv == 'fw': # forward
return value * c
elif conv == 'bw': # backward
return value / c
else:
raise ValueError('Sense of conversion not defined')
def fit_kittel(H0SI, f, I, P, gamma, mu0):
fopt, fcov = curve_fit(lambda x, a: kittel(x, a, gamma, mu0), H0SI, f*1e9)
Meffopt = fopt[0]
Meffopterr = np.sqrt(np.diag(fcov))[0]
MeffoptCGS = cgssi('M', Meffopt, 'bw')
MeffopterrCGS = cgssi('M', Meffopterr, 'bw')
ffit = kittel(H0SI, Meffopt, gamma, mu0)
fig = plt.figure()
# plt.errorbar(H0, f, xerr=H0err, fmt='o')
plt.plot(H0, f, ':o', label='Data')
plt.plot(H0, ffit*1e-9, '', label='Fit')
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.xlabel('$H_0$ (Oe)')
plt.ylabel('f (GHz)')
plt.legend(loc='lower right')
plt.title('Fit to Kittel formula')
boxtext = '\n'.join((
'I = {} mA \nP = {} dBm'.format(I, P),
r'$M_{eff}^{fit}=$'+'$({:.0f} \pm {:.0f})$ '.format(MeffoptCGS, MeffopterrCGS)+'emu/cm$^3$'))
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax = plt.gca()
ax.text(0.03, 0.97, boxtext, verticalalignment='top',
transform=ax.transAxes, bbox=props, fontsize=10)
plt.show()
fig.savefig(outputFileKittel.fileDirName, bbox_inches="tight", dpi=plotDpi)
return MeffoptCGS, MeffopterrCGS
def fit_gilbert(f, DeltaSI, gamma):
''' f in GHz, rest in SI '''
popt, pcov = curve_fit(lambda x, a, b: gilbert(x, mu0, gamma, a, b), f*1e9, DeltaSI)
alphaopt = popt[0]
alphaopterr = np.sqrt(np.diag(pcov))[0]
delta0opt = popt[1]
delta0opterr = np.sqrt(np.diag(pcov))[1]
delta0optCGS = cgssi('H', delta0opt, 'bw')
delta0opterrCGS = cgssi('H', delta0opterr, 'bw')
Deltafit = gilbert(f*1e9, mu0, gamma, alphaopt, delta0opt)
fig = plt.figure(dpi=600)
plt.plot(f, cgssi('H', DeltaSI, 'bw'), ':o', label='Data')
plt.plot(f, cgssi('H', Deltafit, 'bw'), label='Fit')
# plt.plot(f, DeltaSI, ':o', label='Data')
# plt.plot(f, Deltafit, label='Fit')
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.xlabel('$f$ (GHz)')
plt.ylabel('$\Delta$ (Oe)')
plt.legend(loc='lower right')
plt.title('Gilbert fitting')
boxtext = '\n'.join((
'I = {} mA \nP = {} dBm'.format(I, P),
r'$\alpha^{fit}=$'+'$({:.4f} \pm {:.4f})$ '.format(alphaopt, alphaopterr),
r'$\Delta_{0}^{fit}=$'+'$({:.1f} \pm {:.1f})$ '.format(delta0optCGS, delta0opterrCGS)+'Oe'))
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax = plt.gca()
ax.text(0.03, 0.97, boxtext, verticalalignment='top',
transform=ax.transAxes, bbox=props, fontsize=10)
plt.show()
fig.savefig(outputFileGilbert.fileDirName, bbox_inches="tight", dpi=plotDpi)
return alphaopt, alphaopterr, popt, pcov
def lineshapeAnalysis(Vs, Va, MsSI, t, d, hbar, MeffoptSI, H0SI, Vserr, Vaerr, Mserr, derr, terr, MeffopterrSI, H0errSI):
SHA = get_SHA(Vs, Va, MsSI, t, d, hbar, MeffoptSI, H0SI)
SHAerr = get_SHAerr(e, mu0, hbar, Vs, Va, MsSI, t, d, MeffoptSI, H0SI, Vserr, Vaerr, Mserr, derr, terr, MeffopterrSI, H0errSI)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
# ax1.scatter(H0, Vs*1e6, label='Vs')
ax1.plot(H0, Vs*1e6, ':o', label='Vs')
ax1.plot(H0, Va*1e6, ':o', label='Va')
ax2.errorbar(H0, SHA, yerr=SHAerr, ls=':', marker='o', c='g', capsize=2, label='SHA')
plt.xlim(left=0)
# plt.ylim(bottom=0)
ax1.set_xlabel('$H_0$ (Oe)')
ax1.set_ylabel('$V_{i}$ ($\mu$V)')
ax2.set_ylabel('$\Theta_{sh}$')
fig.legend(bbox_to_anchor=(1,1), bbox_transform=ax1.transAxes)
plt.title('Line shape analysis')
plt.show()
fig.savefig(outputFileLS1.fileDirName, bbox_inches="tight", dpi=plotDpi)
return SHA, SHAerr
# ____________________________________________________________________________
root = tk.Tk()
root.withdraw()
inputFile = File(filedialog.askopenfilename(parent=root, title='Choose .csv file with fitting summary'))
# inputFile = File('D:/ANALYSIS/Mn3SnN/ST-FMR/MA2427-1/210401/003_lineshape_15dBm/fittingOutput/000_fittingSummary.csv')
outputSubdir = 'lineshapeAnaOutput/'
outputFileGilbert = File(inputFile.fileDir + '/' + outputSubdir + inputFile.fileNameWOExt + '_gilbertFit.png')
outputFileGilbert.makeDirIfNotExist()
outputFileKittel = File(inputFile.fileDir + '/' + outputSubdir + inputFile.fileNameWOExt + '_kittelFit.png')
outputFileLS1 = File(inputFile.fileDir + '/' + outputSubdir + inputFile.fileNameWOExt + '_shaPlot.png')
inputData = | pd.read_csv(inputFile.fileDirName,index_col=False) | pandas.read_csv |
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@pytest.fixture
def four_level_index_dataframe():
arr = np.array(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
]
)
index = MultiIndex(
levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
names=["one", "two", "three", "four"],
)
return DataFrame(arr, index=index, columns=list("ABCDE"))
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
# see gh-2903
arr = np.random.randn(4, 4)
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
def test_xs_values(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
tm.assert_series_equal(result, expected)
def test_xs_missing_values_in_index():
# see gh-6574
# missing values in returned index should be preserved
acc = [
("a", "abcde", 1),
("b", "bbcde", 2),
("y", "yzcde", 25),
("z", "xbcde", 24),
("z", None, 26),
("z", "zbcde", 25),
("z", "ybcde", 26),
]
df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
expected = DataFrame(
{"cnt": [24, 26, 25, 26]},
index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
)
result = df.xs("z", level="a1")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):
# see gh-13719
frame = multiindex_dataframe_random_data
df = concat([frame] * 2)
assert df.index.is_unique is False
expected = concat([frame.xs("one", level="second")] * 2)
result = df.xs(key, level=level)
tm.assert_frame_equal(result, expected)
def test_xs_level(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
expected = df[df.index.get_level_values(1) == "two"]
expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_frame_equal(result, expected)
def test_xs_level_eq_2():
arr = np.random.randn(3, 5)
index = MultiIndex(
levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
)
df = DataFrame(arr, index=index)
expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer",
[
lambda df: df.xs(("a", 4), level=["one", "four"]),
lambda df: df.xs("a").xs(4, level="four"),
],
)
def test_xs_level_multiple(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
expected_index = MultiIndex(
levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_setting_with_copy_error(multiindex_dataframe_random_data):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_setting_with_copy_error_multiple(four_level_index_dataframe):
# this is a copy in 0.14
df = four_level_index_dataframe
result = df.xs(("a", 4), level=["one", "four"])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_integer_key():
# see gh-2107
dates = range(20111201, 20111205)
ids = list("abcde")
index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
df = DataFrame(np.random.randn(len(index), 3), index, ["X", "Y", "Z"])
result = df.xs(20111201, level="date")
expected = df.loc[20111201, :]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
)
def test_xs_level0(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
]
expected_index = MultiIndex(
levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
codes=[[0, 1], [0, 1], [1, 0]],
names=["two", "three", "four"],
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_level_series(multiindex_dataframe_random_data):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
df = multiindex_dataframe_random_data
s = df["A"]
result = s[:, "two"]
expected = df.xs("two", level=1)["A"]
tm.assert_series_equal(result, expected)
def test_xs_level_series_ymd(multiindex_year_month_day_dataframe_random_data):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
df = multiindex_year_month_day_dataframe_random_data
s = df["A"]
result = s[2000, 5]
expected = df.loc[2000, 5]["A"]
tm.assert_series_equal(result, expected)
def test_xs_level_series_slice_not_implemented(
multiindex_year_month_day_dataframe_random_data,
):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
# not implementing this for now
df = multiindex_year_month_day_dataframe_random_data
s = df["A"]
msg = r"\(2000, slice\(3, 4, None\)\)"
with pytest.raises(TypeError, match=msg):
s[2000, 3:4]
def test_series_getitem_multiindex_xs():
# GH6258
dt = list(date_range("20130903", periods=3))
idx = MultiIndex.from_product([list("AB"), dt])
s = Series([1, 3, 4, 1, 3, 4], index=idx)
expected = Series([1, 1], index=list("AB"))
result = s.xs("20130903", level=1)
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs_by_label():
# GH5684
idx = MultiIndex.from_tuples(
[("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
)
s = Series([1, 2, 3, 4], index=idx)
s.index.set_names(["L1", "L2"], inplace=True)
expected = Series([1, 3], index=["a", "b"])
expected.index.set_names(["L1"], inplace=True)
result = s.xs("one", level="L2")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import numpy as np
import pandas as pd
from tests.datasets import numerical
class TestRandomIntegerGenerator:
def test(self):
output = numerical.RandomIntegerGenerator.generate(10)
assert len(output) == 10
assert output.dtype == int
assert len(pd.unique(output)) > 1
assert np.isnan(output).sum() == 0
class TestRandomIntegerNaNsGenerator:
def test(self):
output = numerical.RandomIntegerNaNsGenerator.generate(10)
assert len(output) == 10
assert output.dtype == float
assert len(pd.unique(output)) > 1
assert np.isnan(output).sum() > 0
class TestConstantIntegerGenerator:
def test(self):
output = numerical.ConstantIntegerGenerator.generate(10)
assert len(output) == 10
assert output.dtype == int
assert len(pd.unique(output)) == 1
assert np.isnan(output).sum() == 0
class TestConstantIntegerNaNsGenerator:
def test(self):
output = numerical.ConstantIntegerNaNsGenerator.generate(10)
assert len(output) == 10
assert output.dtype == float
assert len(pd.unique(output)) == 2
assert np.isnan(output).sum() >= 1
class TestAlmostConstantIntegerGenerator:
def test(self):
output = numerical.AlmostConstantIntegerGenerator.generate(10)
assert len(output) == 10
assert output.dtype == int
assert len(pd.unique(output)) == 2
assert np.isnan(output).sum() == 0
class TestAlmostConstantIntegerNaNsGenerator:
def test(self):
output = numerical.AlmostConstantIntegerNaNsGenerator.generate(10)
assert len(output) == 10
assert output.dtype == float
assert len(pd.unique(output)) == 3
assert np.isnan(output).sum() >= 1
class TestNormalGenerator:
def test(self):
output = numerical.NormalGenerator.generate(10)
assert len(output) == 10
assert output.dtype == float
assert len(pd.unique(output)) == 10
assert np.isnan(output).sum() == 0
class TestNormalNaNsGenerator:
def test(self):
output = numerical.NormalNaNsGenerator.generate(10)
assert len(output) == 10
assert output.dtype == float
assert 1 < len(pd.unique(output)) <= 10
assert np.isnan(output).sum() >= 1
class TestBigNormalGenerator:
def test(self):
output = numerical.BigNormalGenerator.generate(10)
assert len(output) == 10
assert output.dtype == float
assert len( | pd.unique(output) | pandas.unique |
import sys
import os
import codecs
import glob
import configparser
import pandas as pd
from datetime import datetime
from docopt import docopt
from jinja2 import Environment, FileSystemLoader
from lib.Util.util import *
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Create report.
class CreateReport:
def __init__(self):
self.util = Utilty()
# Read config file.
full_path = os.path.dirname(os.path.abspath(__file__))
config = configparser.ConfigParser()
try:
config.read(os.path.join(full_path, 'config.ini'))
except Exception as err:
self.util.print_exception(err, 'File exists error')
sys.exit(1)
self.report_date_format = config['Report']['date_format']
self.report_test_path = os.path.join(
full_path, config['Report']['report_test'])
self.report_test_file = os.path.join(
self.report_test_path, config['Report']['report_test_file'])
self.template_test = config['Report']['template_test']
self.report_train_path = os.path.join(
self.report_test_path, config['Report']['report_train'])
self.report_train_file = os.path.join(
self.report_train_path, config['Report']['report_train_file'])
self.template_train = config['Report']['template_train']
self.header_train = str(config['Report']['header_train']).split('@')
self.header_test = str(config['Report']['header_test']).split('@')
def create_report(self, mode='train', start_date=None):
# Check mode.
if mode not in ['train', 'test']:
self.util.print_message(FAIL, 'Invalid mode: {}'.format(mode))
exit(1)
# Gather reporting items.
if mode == 'train':
self.util.print_message(NOTE, 'Creating training report.')
csv_file_list = glob.glob(os.path.join(
self.report_train_path, '*.csv'))
# Create DataFrame.
content_list = []
for file in csv_file_list:
df = pd.read_csv(file, names=self.header_train, sep=',')
df['date'] = pd.to_datetime(df['date'])
selected_df = df[(start_date < df['date'])]
content_list.append(selected_df)
if len(content_list) != 0:
df_csv = pd.concat(content_list).drop_duplicates().sort_values(by=['ip', 'port'],
ascending=True).reset_index(drop=True,
col_level=1)
items = []
for idx in range(len(df_csv)):
items.append({'ip_addr': df_csv.loc[idx, 'ip'],
'port': df_csv.loc[idx, 'port'],
'prod_name': df_csv.loc[idx, 'service'],
'vuln_name': df_csv.loc[idx, 'vuln_name'],
'description': df_csv.loc[idx, 'description'],
'type': df_csv.loc[idx, 'type'],
'exploit': df_csv.loc[idx, 'exploit'],
'target': df_csv.loc[idx, 'target'],
'payload': df_csv.loc[idx, 'payload'],
'ref': str(df_csv.loc[idx, 'reference']).replace('@', '<br>')})
try:
# Setting template.
env = Environment(
loader=FileSystemLoader(self.report_train_path))
template = env.get_template(self.template_train)
pd.set_option('display.max_colwidth', -1)
html = template.render(
{'title': 'Deep Exploit Scan Report', 'items': items})
# Write report.
with codecs.open(self.report_train_file, 'w', 'utf-8') as fout:
fout.write(html)
except Exception as err:
self.util.print_exception(err, 'Creating report error.')
else:
self.util.print_message(
WARNING, 'Exploitation result is not found.')
self.util.print_message(OK, 'Creating training report done.')
else:
self.util.print_message(NOTE, 'Creating testing report.')
csv_file_list = glob.glob(
os.path.join(self.report_test_path, '*.csv'))
# Create DataFrame.
content_list = []
for file in csv_file_list:
df = | pd.read_csv(file, names=self.header_test, sep=',') | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": | pandas.StringDtype() | pandas.StringDtype |
"""
q1-final.py: for sub-challenge 1
"""
import sklearn.ensemble
import pandas
import step00
if __name__ == "__main__":
# clinical_data
clinical_data = pandas.read_csv("/data/clinical_data.csv")
clinical_data.set_index("patientID", inplace=True)
clinical_data["ECOGPS"] = list(map(lambda x: float(x) if step00.can_convert_to_float(x) else None, list(clinical_data["ECOGPS"])))
clinical_data["TMB"] = list(map(lambda x: float(x) if step00.can_convert_to_float(x) else None, list(clinical_data["TMB"])))
clinical_data.columns = list(map(lambda x: "Clinical_" + x, list(clinical_data.columns)))
clinical_data.sort_index(axis="index", inplace=True)
data_list = [clinical_data]
for i, f in enumerate(["/data/GRCh37ERCC_ensembl75_isoforms_tpm.csv"]):
tmp_data = pandas.read_csv(f)
tmp_data.set_index(list(tmp_data.columns)[0], inplace=True)
tmp_data = tmp_data.T
tmp_data.columns = list(map(lambda x: str(i) + "_" + x, list(tmp_data.columns)))
tmp_data.sort_index(axis="index", inplace=True)
data_list.append(tmp_data)
given_data = | pandas.concat(data_list, axis="columns", join="inner", verify_integrity=True) | pandas.concat |
import sys
import os
import logging
import datetime
import pandas as pd
from job import Job, Trace
from policies import ShortestJobFirst, FirstInFirstOut, ShortestRemainingTimeFirst, QuasiShortestServiceFirst
sys.path.append('..')
def simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):
if policy == 'sjf':
scheduler = ShortestJobFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'fifo':
scheduler = FirstInFirstOut(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'srtf':
scheduler = ShortestRemainingTimeFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'qssf':
scheduler = QuasiShortestServiceFirst(
trace, vc, placement, log_dir, logger, start_ts, args[0])
scheduler.simulate()
logger.info(f'Finish {vc.vc_name}')
return True
def get_available_schedulers():
return ['fifo', 'sjf', 'srtf', 'qssf']
def get_available_placers():
return ['random', 'consolidate', 'consolidateFirst']
def trace_process(dir, date_range):
start = '2020-04-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['job_id', 'user', 'vc', 'jobname', 'gpu_num',
'cpu_num', 'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_philly_process(dir, date_range):
start = '2017-10-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['user', 'vc', 'jobname', 'gpu_num',
'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
df['state'] = df['state'].replace('Pass', 'COMPLETED')
df['state'] = df['state'].replace('Failed', 'FAILED')
df['state'] = df['state'].replace('Killed', 'CANCELLED')
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_parser(df):
trace = Trace()
for _, series in df.iterrows():
trace.append_job(Job(series))
trace.sort_jobs('submit_time')
return trace
def logger_init(file):
logger = logging.getLogger()
handler_file = logging.FileHandler(f'{file}.log', 'w')
handler_stream = logging.StreamHandler() # sys.stdout
logger.setLevel(logging.INFO)
handler_file.setLevel(logging.INFO)
handler_stream.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s | %(processName)s | %(message)s', datefmt='%Y %b %d %H:%M:%S')
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
logger.addHandler(handler_file)
logger.addHandler(handler_stream)
return logger
def cluster_concatenate(policy, placer, log_dir, dir):
prefix = f'{policy}_{placer}'
if not os.path.exists(log_dir+'/all'):
os.mkdir(log_dir+'/all')
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vcs = list(vc_dict.keys())
'''Log'''
cluster_log = pd.DataFrame()
for vc in vcs:
vc_log = pd.read_csv(f'{log_dir}/{vc}/{prefix}_{vc}_log.csv')
cluster_log = pd.concat([cluster_log, vc_log])
cluster_log.sort_values(by='submit_time', inplace=True)
cluster_log.to_csv(f'{log_dir}/all/{prefix}_all_log.csv', index=False)
'''Seq'''
cluster_seq = pd.DataFrame()
add_list = ['total_gpu_num', 'idle_gpu_num', 'pending_gpu_num', 'running_gpujob_num', 'pending_gpujob_num',
'pending_job_num_less_8', 'total_node_num', 'consolidate_node_num', 'shared_node_num']
for vc in vcs:
vc_seq = pd.read_csv(f'{log_dir}/{vc}/{prefix}_{vc}_seq.csv')
if len(cluster_seq) == 0:
cluster_seq = vc_seq
continue
cluster_seq[add_list] = cluster_seq[add_list] + vc_seq[add_list]
cluster_seq.dropna(inplace=True)
cluster_seq = cluster_seq.astype(int)
cluster_seq['gpu_utilization'] = ((cluster_seq['total_gpu_num'] - cluster_seq['idle_gpu_num']) /
cluster_seq['total_gpu_num']).round(3)
cluster_seq.to_csv(f'{log_dir}/all/{prefix}_all_seq.csv', index=False)
def cluster_analysis(placer, log_dir, dir):
'''Generate Algorithm Comparsion CSV'''
# ignore_warm_up = start_ts + 7*24*3600
prefix_list = []
for i in get_available_schedulers():
prefix = f'{i}_{placer}'
prefix_list.append(prefix)
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vcs = list(vc_dict.keys())
vcs.append('all')
jct_avg = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Data Preprocessing
# ### Importing the libraries
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# ### Reading the dataset
# In[ ]:
dataset = pd.read_csv('startups.csv')
dataset
# In[ ]:
dataset.describe()
# In[ ]:
# Separate Independent variables (X) and dependent variable (y)
X = dataset.iloc[:, 0:4].values
y = dataset.iloc[:, 4].values
print(X,'\n\n', y)
# ### Missing values
# In[ ]:
# Replace missing values by the column/variable average (mean)
# Import Class --> Create Object --> Fit Object to Data --> Transform Data
from sklearn.impute import SimpleImputer #import the SimpleImputer class
imputer = SimpleImputer(missing_values=np.nan, strategy='mean') #create the imputer object
imputer.fit(X[:, 0:3]) #fit the object to the data
X[:, 0:3] = imputer.transform(X[:, 0:3]) #transform data
print(X)
# ### Encoding categorical variables
# In[ ]:
# Encoding independent variables
# Import Class --> Create Object --> Fit Object to Data --> Transform Data
from sklearn.compose import ColumnTransformer # import class
from sklearn.preprocessing import OneHotEncoder # import class
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(drop = 'first'), [3])], remainder='passthrough')
#create object
X = np.array(ct.fit_transform(X)) # fit object to data and transform data
print(X)
# ### Feature scaling
# In[ ]:
# Adjusting the scales of independent variables
# Import Class --> Create Object --> Fit Object to Data --> Transform Data
from sklearn.preprocessing import StandardScaler # import class
sc = StandardScaler() # create object
X = sc.fit_transform(X) # fit and transform
print(X)
# In[ ]:
df = pd.DataFrame(X)
df.describe()
# ### Splitting the dataset into the Training set and Test set
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# In[ ]:
print(X_train)
# # Artificial Neural Net (ANN) for Regression
# ### Training the Model
# In[ ]:
# We use ANNs.
# Can also use linear, polynomial, support vector, decision trees or random forest regression.
# Import Class --> Create Object --> Fit Object to Data --> Predict
import tensorflow as tf # import class
from tensorflow import keras
regressor = keras.Sequential([
tf.keras.layers.Dense(units = 20, input_dim=5, activation='relu'),
tf.keras.layers.Dense(units = 20, activation='relu'),
tf.keras.layers.Dense(units = 20, activation='relu'),
tf.keras.layers.Dense(units = 10, activation='relu'),
tf.keras.layers.Dense(units = 1, activation = 'linear')
]) # create object with the required layers
regressor.compile(
loss='mean_squared_error',
optimizer=tf.keras.optimizers.RMSprop(0.01),
batch_size=20,
metrics=['mean_absolute_error']
) # compile object selecting options
regressor.fit(X_train, y_train, epochs= 500) # fit object and decide the number of epochs
# ### Testing the Model
# In[ ]:
# make predictions for test data
y_pred = regressor.predict(X_test).flatten() # predict (flatten() reshapes y_pred to a one-dimensional array)
err = abs(y_test - y_pred)
df = | pd.DataFrame({'y_pred': y_pred, 'y_test': y_test, 'error': err}) | pandas.DataFrame |
import os
import fnmatch
import pandas
def load_config_yml(config_file, individual=False):
# loads a configuration YAML file
#
# input
# config_file: full filepath to YAML (.yml) file
#
# output
# config: Configuration object
import os
import yaml
import yamlordereddictloader
from CPAC.utils import Configuration
try:
config_path = os.path.realpath(config_file)
config_dict = yaml.safe_load(open(config_path, 'r'))
config = Configuration(config_dict)
except Exception as e:
err = "\n\n[!] CPAC says: Could not load or read the configuration " \
"YAML file:\n%s\nDetails: %s\n\n" % (config_file, e)
raise Exception(err)
if individual:
config.pipeline_setup['log_directory']['path'] = os.path.abspath(config.pipeline_setup['log_directory']['path'])
config.pipeline_setup['working_directory']['path'] = os.path.abspath(config.pipeline_setup['working_directory']['path'])
config.pipeline_setup['output_directory']['path'] = os.path.abspath(config.pipeline_setup['output_directory']['path'])
config.pipeline_setup['crash_log_directory']['path'] = os.path.abspath(config.pipeline_setup['crash_log_directory']['path'])
return config
def load_text_file(filepath, label="file"):
# loads a text file and returns the lines in a list
#
# input
# filepath: full filepath to the text file
#
# output
# lines_list: list of lines from text file
if not filepath.endswith(".txt"):
err = "\n\n[!] CPAC says: The %s should be a text file (.txt).\n" \
"Path provided: %s\n\n" % (label, filepath)
raise Exception(err)
try:
with open(filepath,"r") as f:
lines_list = f.readlines()
except Exception as e:
err = "\n\n[!] CPAC says: Could not load or read the %s:\n%s\n" \
"Details: %s\n\n" % (label, filepath, e)
raise Exception(err)
# get rid of those \n's that love to show up everywhere
lines_list = [i.rstrip("\n") for i in lines_list]
return lines_list
def grab_pipeline_dir_subs(pipeline_dir, ses=False):
import os
inclusion_list = []
if ses:
pipeline_list = [x for x in os.listdir(pipeline_dir) if os.path.isdir(os.path.join(pipeline_dir, x))]
else:
pipeline_list = [x.split('_')[0] for x in os.listdir(pipeline_dir) if os.path.isdir(os.path.join(pipeline_dir, x))]
for sub_id in pipeline_list:
if sub_id not in inclusion_list:
inclusion_list.append(sub_id)
inclusion_list = sorted(inclusion_list)
return inclusion_list
def read_pheno_csv_into_df(pheno_csv, id_label=None):
"""Read the phenotypic file CSV or TSV into a Pandas DataFrame."""
import pandas as pd
with open(pheno_csv, "r") as f:
if id_label:
if '.tsv' in pheno_csv or '.TSV' in pheno_csv:
pheno_df = pd.read_table(f, dtype={id_label: object})
else:
pheno_df = pd.read_csv(f, dtype={id_label: object})
else:
if '.tsv' in pheno_csv or '.TSV' in pheno_csv:
pheno_df = pd.read_table(f)
else:
pheno_df = pd.read_csv(f)
return pheno_df
def gather_nifti_globs(pipeline_output_folder, resource_list,
pull_func=False):
# the number of directory levels under each participant's output folder
# can vary depending on what preprocessing strategies were chosen, and
# there may be several output filepaths with varying numbers of directory
# levels
# this parses them quickly while also catching each preprocessing strategy
import os
import glob
import pandas as pd
import pkg_resources as p
from __builtin__ import any as b_any
ext = ".nii"
nifti_globs = []
keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
try:
keys = pd.read_csv(keys_csv)
except Exception as e:
err = "\n[!] Could not access or read the cpac_outputs.csv " \
"resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
raise Exception(err)
derivative_list = list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-score']['Resource'])
derivative_list = derivative_list + list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-stat']['Resource'])
if pull_func:
derivative_list = derivative_list + list(
keys[keys['Functional timeseries'] == 'yes']['Resource'])
if len(resource_list) == 0:
err = "\n\n[!] No derivatives selected!\n\n"
raise Exception(err)
# remove any extra /'s
pipeline_output_folder = pipeline_output_folder.rstrip("/")
print("\n\nGathering the output file paths from "
"{0}...".format(pipeline_output_folder))
# this is just to keep the fsl feat config file derivative_list entries
# nice and lean
dirs_to_grab = []
for derivative_name in derivative_list:
for resource_name in resource_list:
if resource_name in derivative_name:
dirs_to_grab.append(derivative_name)
# grab MeanFD_Jenkinson just in case
dirs_to_grab.append("power_params")
for resource_name in dirs_to_grab:
glob_string = os.path.join(pipeline_output_folder, "*",
resource_name, "*", "*")
# get all glob strings that result in a list of paths where every path
# ends with a NIFTI file
prog_string = ".."
while len(glob.glob(glob_string)) != 0:
if b_any(ext in x for x in glob.glob(glob_string)) == True:
nifti_globs.append(glob_string)
glob_string = os.path.join(glob_string, "*")
prog_string = prog_string + "."
print(prog_string)
if len(nifti_globs) == 0:
err = "\n\n[!] No output filepaths found in the pipeline output " \
"directory provided for the derivatives selected!\n\nPipeline " \
"output directory provided: %s\nDerivatives selected:%s\n\n" \
% (pipeline_output_folder, resource_list)
raise Exception(err)
return nifti_globs
def grab_raw_score_filepath(filepath, resource_id):
# this lives in the output path collector
import os
import glob
if "vmhc" in resource_id:
raw_score_path = filepath.replace(resource_id,"vmhc_raw_score")
raw_score_path = raw_score_path.replace(raw_score_path.split("/")[-1],"")
raw_score_path = glob.glob(os.path.join(raw_score_path,"*"))[0]
else:
raw_score_path = filepath.replace("_zstd","")
raw_score_path = raw_score_path.replace("_fisher","")
raw_score_path = raw_score_path.replace("_zstat","")
if "sca_roi_files_to_standard" in resource_id:
sub_folder = raw_score_path.split("/")[-2] + "/"
if "z_score" in sub_folder:
raw_score_path = raw_score_path.replace(sub_folder,"")
elif "sca_tempreg_maps_zstat" in resource_id:
sca_filename = raw_score_path.split("/")[-1]
globpath = raw_score_path.replace(sca_filename, "*")
globpath = os.path.join(globpath, sca_filename)
raw_score_path = glob.glob(globpath)[0]
elif "dr_tempreg_maps" in resource_id:
raw_score_path = raw_score_path.replace("map_z_","map_")
raw_filename = raw_score_path.split("/")[-1]
raw_score_path = raw_score_path.replace(raw_filename,"")
raw_score_path = glob.glob(os.path.join(raw_score_path,"*",raw_filename))[0]
else:
# in case filenames are different between z-standardized and raw
raw_score_path = raw_score_path.replace(raw_score_path.split("/")[-1],"")
try:
raw_score_path = glob.glob(os.path.join(raw_score_path,"*"))[0]
except:
raw_score_path = os.path.join(raw_score_path,"*")
if (raw_score_path is None) or (not os.path.exists(raw_score_path)):
err = "\n\n[!] The filepath for the raw score of " \
"%s can not be found.\nFilepath: %s\n\nThis " \
"is needed for the Measure Mean calculation." \
"\n\n" % (resource_id, raw_score_path)
raise Exception(err)
return raw_score_path
def find_power_params_file(filepath, resource_id, series_id):
import os
try:
power_path = filepath.replace(resource_id, "power_params", 1)
series_id_string = "_scan_%s" % series_id
power_first_half = power_path.split(series_id_string)[0]
power_first_half = os.path.join(power_first_half, series_id_string)
participant_id = power_first_half.split("/")[-3]
except Exception as e:
err = "\n\n[!] Something went wrong with finding the power " \
"parameters file for at least one of the participants.\n\n" \
"Error details: %s\n\n" % e
raise Exception(err)
power_params_file = None
for root, dirs, files in os.walk(power_first_half):
for filename in files:
filepath = os.path.join(root, filename)
if "pow_params.txt" in filepath:
power_params_file = filepath
if not power_params_file:
err = "\n\n[!] Could not find the power parameters file for the " \
"following participant and series..\nParticipant: %s\n" \
"Series: %s\n\nIt should be available here: %s\n\n" \
% (participant_id, series_id, power_first_half)
raise Exception(err)
return power_params_file
def extract_power_params(power_params_lines, power_params_filepath):
# check formatting
if len(power_params_lines) != 2:
err = "\n\n[!] There is something wrong with the formatting of the " \
"power parameters file.\nFilepath: %s\n\n" \
% power_params_filepath
raise Exception(err)
names_list = power_params_lines[0].split(",")
values_list = power_params_lines[1].split(",")
# let's make extra sure
if (values_list[0].replace(" ", "") not in power_params_filepath) or \
(values_list[1].replace(" ", "") not in power_params_filepath):
err = "\n\n[!] There is a mismatch between the contents of the " \
"power parameters file and where it is located!\n" \
"Filepath: %s\n\n" % power_params_filepath
raise Exception(err)
if (names_list[2].replace(" ", "") != "MeanFD_Power") or \
(names_list[3].replace(" ", "") != "MeanFD_Jenkinson") or \
(names_list[-1].replace(" ", "") != "MeanDVARS"):
err = "\n\n[!] There is a mismatch between the power parameters " \
"format and what is expected!!\nFilepath: %s\n\n" \
% power_params_filepath
raise Exception(err)
meanfd_power = values_list[2]
meanfd_jenk = values_list[3]
meandvars = values_list[-1]
return meanfd_power, meanfd_jenk, meandvars
def create_output_dict_list(nifti_globs, pipeline_output_folder,
resource_list, get_motion=False,
get_raw_score=False, pull_func=False,
derivatives=None, exts=['nii', 'nii.gz']):
import os
import glob
import itertools
import pandas as pd
import pkg_resources as p
if len(resource_list) == 0:
err = "\n\n[!] No derivatives selected!\n\n"
raise Exception(err)
if derivatives is None:
keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
try:
keys = pd.read_csv(keys_csv)
except Exception as e:
err = "\n[!] Could not access or read the cpac_outputs.csv " \
"resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
raise Exception(err)
derivatives = list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-score']['Resource'])
derivatives = derivatives + list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-stat']['Resource'])
if pull_func:
derivatives = derivatives + list(keys[keys['Functional timeseries'] == 'yes']['Resource'])
# remove any extra /'s
pipeline_output_folder = pipeline_output_folder.rstrip("/")
print("\n\nGathering the output file paths from "
"{0}...".format(pipeline_output_folder))
# this is just to keep the fsl feat config file derivatives entries
# nice and lean
search_dirs = []
for derivative_name in derivatives:
for resource_name in resource_list:
if resource_name in derivative_name:
search_dirs.append(derivative_name)
'''
search_dirs = [
resource_name
for resource_name in resource_list
if any([resource_name in derivative_name
for derivative_name in derivatives])
]
'''
# grab MeanFD_Jenkinson just in case
search_dirs += ["power_params"]
exts = ['.' + ext.lstrip('.') for ext in exts]
# parse each result of each "valid" glob string
output_dict_list = {}
for root, _, files in os.walk(pipeline_output_folder):
for filename in files:
filepath = os.path.join(root, filename)
if not any(fnmatch.fnmatch(filepath, pattern) for pattern in nifti_globs):
continue
if not any(filepath.endswith(ext) for ext in exts):
continue
relative_filepath = filepath.split(pipeline_output_folder)[1]
filepath_pieces = [_f for _f in relative_filepath.split("/") if _f]
resource_id = filepath_pieces[1]
if resource_id not in search_dirs:
continue
series_id_string = filepath_pieces[2]
strat_info = "_".join(filepath_pieces[3:])[:-len(ext)]
unique_resource_id = (resource_id, strat_info)
if unique_resource_id not in output_dict_list.keys():
output_dict_list[unique_resource_id] = []
unique_id = filepath_pieces[0]
series_id = series_id_string.replace("_scan_", "")
series_id = series_id.replace("_rest", "")
new_row_dict = {}
new_row_dict["participant_session_id"] = unique_id
new_row_dict["participant_id"], new_row_dict["Sessions"] = \
unique_id.split('_')
new_row_dict["Series"] = series_id
new_row_dict["Filepath"] = filepath
print('{0} - {1} - {2}'.format(
unique_id,
series_id,
resource_id
))
if get_motion:
# if we're including motion measures
power_params_file = find_power_params_file(filepath,
resource_id, series_id)
power_params_lines = load_text_file(power_params_file,
"power parameters file")
meanfd_p, meanfd_j, meandvars = \
extract_power_params(power_params_lines,
power_params_file)
new_row_dict["MeanFD_Power"] = meanfd_p
new_row_dict["MeanFD_Jenkinson"] = meanfd_j
new_row_dict["MeanDVARS"] = meandvars
if get_raw_score:
# grab raw score for measure mean just in case
raw_score_path = grab_raw_score_filepath(filepath,
resource_id)
new_row_dict["Raw_Filepath"] = raw_score_path
# unique_resource_id is tuple (resource_id,strat_info)
output_dict_list[unique_resource_id].append(new_row_dict)
return output_dict_list
def create_output_df_dict(output_dict_list, inclusion_list=None):
import pandas as pd
output_df_dict = {}
# unique_resource_id is tuple (resource_id,strat_info)
for unique_resource_id in output_dict_list.keys():
# NOTE: this dataframe reflects what was found in the C-PAC output
# directory for individual-level analysis outputs,
# NOT what is in the pheno file
new_df = pd.DataFrame(output_dict_list[unique_resource_id])
# drop whatever is not in the inclusion lists
if inclusion_list:
new_df = new_df[new_df.participant_id.isin(inclusion_list)]
if new_df.empty:
print("No outputs found for {0} for the participants "
"listed in the the group analysis participant list you "
"used. Skipping generating a model for this "
"output.".format(unique_resource_id))
continue
# unique_resource_id is tuple (resource_id,strat_info)
if unique_resource_id not in output_df_dict.keys():
output_df_dict[unique_resource_id] = new_df
return output_df_dict
def gather_outputs(pipeline_folder, resource_list, inclusion_list,
get_motion, get_raw_score, get_func=False,
derivatives=None):
nifti_globs = gather_nifti_globs(
pipeline_folder,
resource_list,
get_func
)
output_dict_list = create_output_dict_list(
nifti_globs,
pipeline_folder,
resource_list,
get_motion,
get_raw_score,
get_func,
derivatives
)
output_df_dict = create_output_df_dict(output_dict_list, inclusion_list)
return output_df_dict
def pheno_sessions_to_repeated_measures(pheno_df, sessions_list):
import pandas as pd
"""Take in the selected session names, and match them to the unique
participant-session IDs appropriately for an FSL FEAT repeated measures
analysis.
More info:
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/
UserGuide#Paired_Two-Group_Difference_.28Two-Sample_Paired_T-Test.29
Sample input:
pheno_df
sub01
sub02
sessions_list
[ses01, ses02]
Expected output:
pheno_df Sessions participant_sub01 participant_sub02
sub01 ses01 1 0
sub02 ses01 0 1
sub01 ses02 1 0
sub02 ses02 0 1
"""
# first, check to see if this design matrix setup has already been done
# in the pheno CSV file
# NOTE: this is mainly for PRESET GROUP ANALYSIS MODELS!!!
num_partic_cols = 0
for col_names in pheno_df.columns:
if "participant_" in col_names:
num_partic_cols += 1
if num_partic_cols > 1 and ("Sessions" in pheno_df.columns or "Sessions_column_one" in pheno_df.columns):
for part_id in pheno_df["participant_id"]:
if "participant_{0}".format(part_id) in pheno_df.columns:
continue
break
else:
# if it's already set up properly, then just send the pheno_df
# back and bypass all the machinery below
return pheno_df
else:
# if not an FSL model preset, continue as normal
new_rows = []
another_new_row = []
# grab the ordered sublist before we double the rows
sublist = pheno_df['participant_id']
for session in sessions_list:
sub_pheno_df = pheno_df.copy()
sub_pheno_df["Sessions"] = session
sub_pheno_df["participant_session_id"] = pheno_df.participant_id+'_ses-%s' % session
new_rows.append(sub_pheno_df)
another_new_row.append(sub_pheno_df)
pheno_df = pd.concat(new_rows)
pheno_df = pd.concat(another_new_row)
sessions_col = []
part_ids_col = []
# participant IDs new columns
participant_id_cols = {}
i = 0
for participant_unique_id in pheno_df["participant_session_id"]:
part_col = [0] * len(pheno_df["participant_session_id"])
for session in sessions_list:
if session in participant_unique_id.split("_")[1]:
#print(participant_unique_id)# generate/update sessions categorical column
part_id = participant_unique_id.split("_")[0]
part_ids_col.append(str(part_id))
sessions_col.append(str(session))
header_title = "participant_%s" % part_id
# generate/update participant ID column (1's or 0's)
if header_title not in participant_id_cols.keys():
part_col[i] = 1
participant_id_cols[header_title] = part_col
else:
participant_id_cols[header_title][i] = 1
i += 1
pheno_df["Sessions"] = sessions_col
pheno_df["participant"] = part_ids_col
# add new participant ID columns
for sub_id in sublist:
new_col = 'participant_{0}'.format(sub_id)
pheno_df[new_col] = participant_id_cols[new_col]
pheno_df = pheno_df.astype('object')
return pheno_df
def pheno_series_to_repeated_measures(pheno_df, series_list,
repeated_sessions=False):
import pandas as pd
# take in the selected series/scans, and create all of the permutations
# of unique participant IDs (participant_site_session) and series/scans
# and populate the pheno
# this is so the user does not have to have a specially-formatted
# version of the phenotype CSV for repeated measures; they can just
# enter the regular one
# first, check to see if this design matrix setup has already been done
# in the pheno CSV file
num_partic_cols = 0
for col_names in pheno_df.columns:
if "participant_" in col_names:
num_partic_cols += 1
if num_partic_cols > 1 and "Series" in pheno_df.columns:
for part_id in pheno_df["participant_id"]:
if "participant_{0}".format(part_id) in pheno_df.columns:
continue
break
else:
# if it's already set up properly, then just send the pheno_df
# back and bypass all the machinery below
return pheno_df
new_rows = []
for series in series_list:
sub_pheno_df = pheno_df.copy()
sub_pheno_df["Series"] = series
new_rows.append(sub_pheno_df)
pheno_df = | pd.concat(new_rows) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 14:57:38 2021
@author: kenhu
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
df = pd.read_csv('BankChurners.csv')
df = df.iloc[: , :-2]
df['Attrition_Status'], index = | pd.factorize(df['Attrition_Flag']) | pandas.factorize |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import datetime as dtm
pd.options.mode.chained_assignment = None
from scipy.optimize import curve_fit
from .data import _get_connection
from .plotting import _init_plot, _draw_plot, colormap
from .isotope import Isotope
from .spectrum import Spectrum
class DecayChain(object):
"""Radioactive Decay Chain
Uses the Bateman equations to calculate the activities and number of decays
from a radioactive decay chain as a function of time, both in production
and decay. Also, initial isotope activities and production rates can
be fit to observed count data, or directly fit to HPGe spectra using the
`get_counts()` function.
Parameters
----------
parent_isotope : str
Parent isotope in the chain.
R : array_like, dict, str or pd.DataFrame
Production rate for each isotope in the decay chain as a function of time.
If a Nx2 np.ndarray, element n gives the production rate R_n up until
time t_n for the parent isotope. E.g. If the production rate of the parent
is 5 for 1 hour, and 8 for 3 hours, the array will be [[5, 1], [8, 4]]. If
instead time intervals are preferred to a monotonically increasing grid
of timestamps, set 'timestamp=False'. In this case the production rate array
will be [[5, 1], [8, 3]]. (R=5 for 1 hour, R=8 for 3 hours).
If R is a dict, it specifies the production rate for multiple isotopes,
where the keys are the isotopes and the values are type np.ndarray.
If R is a pd.DataFrame, it must have columns 'R' and 'time', and optionally 'isotope'
if R>0 for any isotopes other than the parent. If R is a str, it must be a
path to a file where the same data is provided. Supported file types are
.csv, .json and .db files, where .json files must be in the 'records' format,
and .db files must have a table named 'R'. Also, each isotope must have
the same time grid, for both timestamp=True and timestamp=False.
A0 : float or dict
Initial activity. If a float, the initial activity of the parent isotope.
If a dict, the keys are the isotopes for which the values represent the
initial activity.
units : str, optional
Units of time for the chain. Options are 'ns', 'us', 'ms', 's', 'm', 'h',
'd', 'y', 'ky', 'My', 'Gy'. Default is 's'.
timestamp : bool, optional
Determines if the 'time' variable in R is to be read as a timestamp-like grid,
i.e. in monotonically increasing order, or as a series of time intervals.
Default is `True`.
Attributes
----------
R : pd.DataFrame
Production rate as a function of time, for each isotope in the chain. This
will be modified if `fit_R()` is called.
A0 : dict
Initial activity of each isotope in the chain.
isotopes : list
List of isotopes in the decay chain.
counts : pd.DataFrame
Observed counts from isotopes in the decay chain, which can be used
to determine the initial activities or average production rates using
the `fit_R()` or `fit_A0()` functions.
R_avg : pd.DataFrame
Time-averaged production rate for each isotope where R>0. This will be
modified if `fit_R()` is called.
Examples
--------
>>> dc = ci.DecayChain('Ra-225', R=[[1.0, 1.0], [0.5, 1.5], [2.0, 6]], units='d')
>>> print(dc.isotopes)
['225RAg', '225ACg', '221FRg', '217ATg', '213BIg', '217RNg', '209TLg', '213POg', '209PBg', '209BIg']
>>> print(dc.R_avg)
R_avg isotope
0 1.708333 225RAg
>>> dc = ci.DecayChain('152EU', A0=3.7E3, units='h')
>>> print(ci.isotopes)
['152EUg', '152GDg', '152SMg']
"""
def __init__(self, parent_isotope, R=None, A0=None, units='s', timestamp=True):
if units.lower() in ['hr','min','yr','sec']:
units = {'hr':'h','min':'m','yr':'y','sec':'s'}[units.lower()]
self.units = units
istps = [Isotope(parent_isotope)]
self.isotopes = [istps[0].name]
self.R, self.A0, self._counts = None, {self.isotopes[0]:0.0}, None
self._chain = [[istps[0].decay_const(units), [], []]]
stable_chain = [False]
while not all(stable_chain):
for n in [n for n,ch in enumerate(stable_chain) if not ch]:
stable_chain[n] = True
for prod in istps[n].decay_products:
br = istps[n].decay_products[prod]
I = Isotope(prod)
if I.name in self.isotopes:
self._chain[self.isotopes.index(I.name)][1].append(br)
self._chain[self.isotopes.index(I.name)][2].append(n)
else:
istps.append(I)
self.isotopes.append(I.name)
self._chain.append([I.decay_const(units), [br], [n]])
stable_chain.append(self._chain[-1][0]<1E-12)
if not stable_chain[-1]:
self.A0[self.isotopes[-1]] = 0.0
self._chain = np.array(self._chain, dtype=object)
self._branches = self._generate_branches()
if A0 is not None:
if type(A0)==float or type(A0)==int:
self.A0[self.isotopes[0]] = float(A0)
else:
for i in A0:
self.A0[self._filter_name(i)] = float(A0[i])
if R is not None:
if type(R)==str:
if R.endswith('.json'):
self.R = pd.DataFrame(json.loads(open(R).read()))
elif R.endswith('.csv'):
self.R = pd.read_csv(R, header=0).fillna(method='ffill')
elif R.endswith('.db'):
self.R = pd.read_sql('SELECT * FROM R', _get_connection(R))
if 'isotope' not in self.R.columns.to_list():
self.R['isotope'] = self.isotopes[0]
elif type(R)==pd.DataFrame:
self.R = R.copy(deep=True)
if 'isotope' not in self.R.columns.to_list():
self.R['isotope'] = self.isotopes[0]
elif type(R)==dict:
self.R = pd.DataFrame({'isotope':[], 'R':[], 'time':[]})
for ip in R:
rate = np.array(R[ip])
rt = pd.DataFrame({'isotope':self._filter_name(ip),'R':rate[:,0],'time':rate[:,1]})
self.R = pd.concat([self.R, rt], ignore_index=True).reset_index(drop=True)
elif type(R)==list or type(R)==np.ndarray:
R = np.asarray(R)
self.R = pd.DataFrame({'isotope':self.isotopes[0], 'R':R[:,0], 'time':R[:,1]})
self.R['isotope'] = [self._filter_name(i) for i in self.R['isotope']]
if not timestamp:
for ip in pd.unique(self.R['isotope']):
self.R.loc[self.R['isotope']==ip,'time'] = np.cumsum(self.R.loc[self.R['isotope']==ip,'time'])
time = np.insert(np.unique(self.R['time']), 0, [0.0])
for n,dt in enumerate(time[1:]-time[:-1]):
_R_dict = {p:self.R[self.R['isotope']==p].iloc[n]['R'] for p in pd.unique(self.R['isotope'])}
self.A0 = {p:self.activity(p, dt, _R_dict=_R_dict) for p in self.A0}
def __str__(self):
return self.isotopes[0]
def _filter_name(self, istp):
return Isotope(istp).name
def _index(self, istp):
return self.isotopes.index(self._filter_name(istp))
def _generate_branches(self):
daughters = {i:[] for i in range(len(self._chain))}
for i,pars in enumerate(self._chain[1:,2]):
for p in pars:
daughters[p].append(i+1)
branches = [[0]]
stable_chain = [len(daughters[br[-1]])==0 for br in branches]
while not all(stable_chain):
for par in list(set([b[-1] for b in branches])):
ds = daughters[par]
if len(ds)==0:
continue
if len(ds)>1:
to_dup = [br for br in branches if br[-1]==par]
for m in range(len(ds)-1):
for b in to_dup:
branches.append(b+[ds[m+1]])
for br in branches:
if br[-1]==par:
br.append(ds[0])
else:
for br in branches:
if br[-1]==par:
br.append(ds[0])
stable_chain = [len(daughters[br[-1]])==0 for br in branches]
br_ratios = []
for br in branches:
r = []
for n,i in enumerate(br[:-1]):
r.append(self._chain[br[n+1]][1][self._chain[br[n+1]][2].index(i)])
br_ratios.append(r+[0.0])
return branches, br_ratios
def _get_branches(self, istp):
if self._filter_name(istp) not in self.isotopes:
return [], []
m = self._index(istp)
branches, br_ratios = [], []
for n,br in enumerate(self._branches[0]):
if m in br:
k = br.index(m)
new_br = np.array(br[:k+1])
if not any([np.array_equal(b, new_br) for b in branches]):
branches.append(new_br)
br_ratios.append(np.array(self._branches[1][n][:k] + [0.0]))
return br_ratios, branches
def _r_lm(self, units=None, r_half_conv=False):
if units is None:
return 1.0
if units.lower() in ['hr','min','yr','sec']:
units = {'hr':'h','min':'m','yr':'y','sec':'s'}[units.lower()]
half_conv = {'ns':1E-9, 'us':1E-6, 'ms':1E-3,
's':1.0, 'm':60.0, 'h':3600.0,
'd':86400.0, 'y':31557.6E3, 'ky':31557.6E6,
'My':31557.6E9, 'Gy':31557.6E12}
if r_half_conv:
return half_conv[units]
return half_conv[units]/half_conv[self.units]
def activity(self, isotope, time, units=None, _R_dict=None, _A_dict=None):
"""Activity of an isotope in the chain
Computes the activity of a given isotope in the decay chain at a
given time. Units of activity are in Bq. Units of time must be either
the units for the DecayChain (default 's'), or specified by the `units`
keyword.
Parameters
----------
isotope : str
Isotope for which the activity is calculated.
time : array_like
Time to calculate the activity. Units of time must be the same
as the decay chain, or be given by `units`. Note that if R!=0, time=0 is
defined as the end of production time. Else, if A0!=0, time=0
is defined as the time at which the specified activities equaled
A0. t<0 is not allowed.
units : str, optional
Units of time, if different from the units of the decay chain.
Returns
-------
activity : np.ndarray
Activity of the given isotope in Bq.
Examples
--------
>>> dc = ci.DecayChain('152EU', A0=3.7E3, units='h')
>>> print(dc.activity('152EU', time=0))
3700.0
>>> print(dc.activity('152EU', time=13.537, units='y'))
1849.999906346199
"""
time = np.asarray(time)
A = np.zeros(len(time)) if time.shape else np.array(0.0)
finished = []
for m,(BR, chain) in enumerate(zip(*self._get_branches(isotope))):
lm = self._r_lm(units)*self._chain[chain, 0]
L = len(chain)
for i in range(L):
sub = ''.join(map(str, chain[i:]))
if sub in finished:
continue
finished.append(sub)
# if i==L-1 and m>0: # only add A0 of end isotope once
# continue
ip = self.isotopes[chain[i]]
A0 = self.A0[ip] if _A_dict is None else _A_dict[ip]
if A0==0.0 and _R_dict is None:
continue
A_i = lm[-1]*(A0/lm[i])
B_i = np.prod(lm[i:-1]*BR[i:-1])
for j in range(i, L):
K = np.arange(i, L)
d_lm = lm[K[K!=j]]-lm[j]
C_j = np.prod(np.where(np.abs(d_lm)>1E-12, d_lm, 1E-12*np.sign(d_lm)))
A += A_i*B_i*np.exp(-lm[j]*time)/C_j
if _R_dict is not None:
if ip in _R_dict:
if lm[j]>1E-12:
A += _R_dict[ip]*lm[-1]*B_i*(1.0-np.exp(-lm[j]*time))/(lm[j]*C_j)
else:
A += _R_dict[ip]*lm[-1]*B_i*time/C_j
return A
def decays(self, isotope, t_start, t_stop, units=None, _A_dict=None):
"""Number of decays in a given time interval
Computes the number of decays from a given isotope in the
decay chain in the time interal t_start to t_stop. The
units of t_start and t_stop must be either the same units
as the decay chain, or be specified by the `units` keyword.
Parameters
----------
isotope : str
Isotope for which the number of decays is calculated.
t_start : array_like
Time of the start of the interval.
t_stop : array_like
Time of the end of the interval.
units : str, optional
Units of time, if different from the units of the decay chain.
Returns
-------
decays : np.ndarray
Number of decays
Examples
--------
>>> dc = ci.DecayChain('152EU', A0=3.7E3, units='h')
>>> print(dc.decays('152EU', t_start=1, t_stop=2))
13319883.293399204
>>> print(dc.decays('152EU', t_start=50, t_stop=50.1, units='y'))
900151618.5228329
"""
t_start, t_stop = np.asarray(t_start), np.asarray(t_stop)
D = np.zeros(len(t_start)) if t_start.shape else (np.zeros(len(t_stop)) if t_stop.shape else np.array(0.0))
for m,(BR, chain) in enumerate(zip(*self._get_branches(isotope))):
lm = self._r_lm(units)*self._chain[chain,0]
L = len(chain)
for i in range(L):
if i==L-1 and m>0:
continue
ip = self.isotopes[chain[i]]
A0 = self.A0[ip] if _A_dict is None else _A_dict[ip]
A_i = lm[-1]*(A0/lm[i])
B_i = np.prod(lm[i:-1]*BR[i:-1])
for j in range(i, len(chain)):
K = np.arange(i, len(chain))
d_lm = lm[K[K!=j]]-lm[j]
C_j = np.prod(np.where(np.abs(d_lm)>1E-12, d_lm, 1E-12*np.sign(d_lm)))
if lm[j]>1E-12:
D += A_i*B_i*(np.exp(-lm[j]*t_start)-np.exp(-lm[j]*t_stop))/(lm[j]*C_j)
else:
D += A_i*B_i*(t_stop-t_start)/C_j
return D*self._r_lm((self.units if units is None else units), True)
@property
def counts(self):
return self._counts
@counts.setter
def counts(self, N_c):
if N_c is not None:
if type(N_c)==pd.DataFrame:
self._counts = N_c.copy(deep=True)
elif type(N_c)!=dict:
N_c = np.asarray(N_c)
self._counts = pd.DataFrame({'isotope':self.isotopes[0],
'start':N_c[:,0],
'stop':N_c[:,1],
'counts':N_c[:,2],
'unc_counts':N_c[:,3]})
else:
self._counts = pd.DataFrame({'isotope':[],'start':[],'stop':[],'counts':[],'unc_counts':[]})
for ip in N_c:
ct = np.array(N_c[ip])
if len(ct.shape)==1:
ct = np.array([ct])
ct = pd.DataFrame({'isotope':self._filter_name(ip),
'start':ct[:,0],
'stop':ct[:,1],
'counts':ct[:,2],
'unc_counts':ct[:,3]})
self._counts = pd.concat([self._counts, ct], ignore_index=True).reset_index(drop=True)
self._counts['activity'] = [p['counts']*self.activity(p['isotope'], p['start'])/self.decays(p['isotope'], p['start'], p['stop']) for n,p in self._counts.iterrows()]
self._counts['unc_activity'] = self._counts['unc_counts']*self.counts['activity']/self._counts['counts']
def get_counts(self, spectra, EoB, peak_data=None):
"""Retrieves the number of measured decays
Takes the number of measured decays from one of the following: a list of spectra,
a file with peak data, or a pandas DataFrame with peak data.
Parameters
----------
spectra : list or str
List of ci.Spectrum objects, or str of spectra filenames. If list of str,
peak_data **must** be specified. In this case the filenames must be
an exact match of the filenames in `peak_data`. If spectra is a str,
it is assumed to be a regex match for the filenames in `peak_data`.
EoB : str or datetime.datetime
Date/time of end-of-bombardment (t=0). Must be a datetime object or
a string in the format '%m/%d/%Y %H:%M:%S'. This is used to calculate
the decay time for the count.
peak_data : str or pd.DataFrame, optional
Either a file path to a file that was created using
`ci.Spectrum.saveas()` or a DataFrame with the same
structure as `ci.Spectrum.peaks`.
Examples
--------
>>> sp = ci.Spectrum('eu_calib_7cm.Spe')
>>> sp.isotopes = ['152EU']
>>> sp.saveas('test_spec.json')
>>> dc = ci.DecayChain('152EU', A0=3.7E3, units='h')
>>> dc.get_counts([sp], EoB='01/01/2016 08:39:08')
>>> dc.get_counts(['eu_calib_7cm.Spe'], EoB='01/01/2016 08:39:08', peak_data='test_spec.json')
>>> print(dc.counts)
"""
counts = []
if type(EoB)==str:
EoB = dtm.datetime.strptime(EoB, '%m/%d/%Y %H:%M:%S')
if peak_data is not None:
if type(peak_data)==str:
if peak_data.endswith('.json'):
peak_data = pd.read_json(peak_data, orient='records')
elif peak_data.endswith('.csv'):
peak_data = pd.read_csv(peak_data, header=0)
elif peak_data.endswith('.db'):
peak_data = pd.read_sql('SELECT * FROM peaks', _get_connection(peak_data))
else:
peak_data = pd.DataFrame(peak_data)
if type(spectra)==str and peak_data is not None:
df = peak_data['filename']
spectra = list(set(map(str, df[df.str.contains(spectra)].to_list())))
for sp in spectra:
if type(sp)==str:
if peak_data is not None:
df = peak_data[peak_data['filename']==sp]
df['isotope'] = [self._filter_name(i) for i in df['isotope']]
df = df[df['isotope'].isin(self.isotopes)]
if len(df):
start_time = df.iloc[0]['start_time']
if type(start_time)==str or type(start_time)==unicode:
start_time = dtm.datetime.strptime(start_time, '%m/%d/%Y %H:%M:%S')
start = (start_time-EoB).total_seconds()*self._r_lm('s')
stop = start+(df.iloc[0]['real_time']*self._r_lm('s'))
else:
raise ValueError('peak_data must be specified if type(spectra)==str')
else:
if peak_data is not None:
df = peak_data[peak_data['filename']==sp.filename]
else:
df = sp.peaks.copy()
df['isotope'] = [self._filter_name(i) for i in df['isotope']]
df = df[df['isotope'].isin(self.isotopes)]
if len(df):
start = (sp.start_time-EoB).total_seconds()*self._r_lm('s')
stop = start+(sp.real_time*self._r_lm('s'))
if len(df):
counts.append(pd.DataFrame({'isotope':df['isotope'], 'start':start, 'stop':stop, 'counts':df['decays'], 'unc_counts':df['unc_decays']}))
self.counts = pd.concat(counts, sort=True, ignore_index=True).sort_values(by=['start']).reset_index(drop=True)
@property
def R_avg(self):
df = []
for ip in np.unique(self.R['isotope']):
time = np.insert(np.unique(self.R['time']), 0, [0.0])
df.append({'isotope':ip, 'R_avg':np.average(self.R[self.R['isotope']==ip]['R'], weights=time[1:]-time[:-1])})
return pd.DataFrame(df)
def fit_R(self):
"""Fit the production rate to count data
Fits a scalar multiplier to the production rate (as a function of time) for
each isotope specified in self.R. The fit minimizes to the number of
measured decays (self.counts) as a function of time, rather than the
activity, because the activity at each time point may be sensitive to
the shape of the decay curve.
Returns
-------
isotopes: list
List of isotopes where R>0. Same indices as fit. (i.e. isotope[0] corresponds
to fit[0] and cov[0][0].)
fit : np.ndarray
The fitted time-averaged production rate for each isotope where R>0.
cov : np.ndarray
Covariance matrix on the fit.
Examples
--------
>>> sp = ci.Spectrum('eu_calib_7cm.Spe')
>>> sp.isotopes = ['152EU']
>>> dc = ci.DecayChain('152EU', R=[[3E5, 36.0]], units='d')
>>> dc.get_counts([sp], EoB='01/01/2016 08:39:08')
>>> print(dc.fit_R())
(array(['152EUg'], dtype=object), array([1291584.51735774]), array([[1.67412376e+09]]))
"""
if self.R is None:
raise ValueError('Cannot fit R: R=0.')
X = []
R_isotopes = [i for i in self.isotopes if i in pd.unique(self.R['isotope'])]
time = np.insert(np.unique(self.R['time']), 0, [0.0])
for ip in R_isotopes:
A0 = {p:0.0 for p in self.A0}
for n,dt in enumerate(time[1:]-time[:-1]):
_R_dict = {ip:self.R[self.R['isotope']==ip].iloc[n]['R']}
A0 = {p:self.activity(p, dt, _R_dict=_R_dict, _A_dict=A0) for p in self.A0}
X.append([self.decays(c['isotope'], c['start'], c['stop'], _A_dict=A0) for n,c in self.counts.iterrows()])
X = np.array(X)
Y = self.counts['counts'].to_numpy()
dY = self.counts['unc_counts'].to_numpy()
func = lambda X_f, *R_f: np.dot(np.asarray(R_f), X_f)
p0 = np.ones(len(X))
fit, cov = curve_fit(func, X, Y, sigma=dY, p0=p0, bounds=(0.0*p0, np.inf*p0))
for n,ip in enumerate(R_isotopes):
df_sub = self.R[self.R['isotope']==ip]
self.R.loc[df_sub.index, 'R'] = df_sub['R']*fit[n]
self.A0 = {i:0.0 for i in self.A0}
for n,dt in enumerate(time[1:]-time[:-1]):
_R_dict = {p:self.R[self.R['isotope']==p].iloc[n]['R'] for p in | pd.unique(self.R['isotope']) | pandas.unique |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from typing import Tuple
from datetime import datetime
from pandas import DataFrame
import pytest
from unittest.mock import Mock, MagicMock
import sqlalchemy
from edfi_schoology_extractor.client_facade import ClientFacade
from edfi_schoology_extractor.api.request_client import RequestClient
from edfi_schoology_extractor.api.paginated_result import PaginatedResult
from edfi_schoology_extractor.mapping import users as usersMap
from edfi_schoology_extractor.mapping import sections as sectionsMap
from edfi_schoology_extractor.mapping import section_associations as sectionAssocMap
from edfi_schoology_extractor.mapping import assignments as assignmentsMap
from edfi_schoology_extractor.mapping import submissions as submissionsMap
from edfi_schoology_extractor.mapping import attendance as attendanceMap
from edfi_schoology_extractor.helpers import sync
def describe_when_getting_users():
def describe_given_one_user():
@pytest.fixture
def result() -> DataFrame:
request_client = Mock(spec=RequestClient)
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
page_size = 22
users = {
"user": [{"uid": 1234, "role_id": 321}],
"total": 1,
"links": {"self": "ignore"},
}
users_page = PaginatedResult(
request_client, page_size, users, "user", "ignore me"
)
roles = {"role": [{"id": 321, "title": "estudiante"}]}
roles_page = PaginatedResult(
request_client, page_size, roles, "role", "ignore me"
)
# Arrange
request_client.get_users.return_value = users_page
request_client.get_roles.return_value = roles_page
# Also want to mock the UDM mapper function, since it is well-tested elsewhere
usersMap.map_to_udm = Mock()
usersMap.map_to_udm.return_value = DataFrame()
# This method will be tested in a different test
sync.sync_resource = Mock(
side_effect=lambda v, w, x, y="", z="": DataFrame(x)
)
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_users()
return result
def it_should_return_a_data_frame(result):
assert isinstance(result, DataFrame)
def describe_given_two_pages_of_users():
@pytest.fixture
def system() -> Tuple[DataFrame, Mock]:
request_client = Mock(spec=RequestClient)
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
page_size = 1
users = {
"user": [{"uid": 1234, "role_id": 321}],
"total": 1,
"links": {"self": "ignore", "next": "url"},
}
users_2 = {
"user": [{"uid": 1235, "role_id": 321}],
"total": 1,
"links": {"self": "ignore"},
}
users_page = PaginatedResult(
request_client, page_size, users, "user", "ignore me"
)
roles = {"role": [{"id": 321, "title": "estudiante"}]}
roles_page = PaginatedResult(
request_client, page_size, roles, "role", "ignore me"
)
request_client.get_users.return_value = users_page
request_client.get_roles.return_value = roles_page
request_client.base_url = ""
request_client.get.return_value = users_2
# Also want to mock the UDM mapper function, since it is well-tested
# elsewhere
usersMap.map_to_udm = Mock()
usersMap.map_to_udm.return_value = DataFrame()
# Arrange
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_users()
return result, usersMap.map_to_udm
def it_should_return_the_user_in_a_data_frame(system):
result, _ = system
assert isinstance(result, DataFrame)
def it_should_use_first_users_page_when_mapping_to_udm(system):
_, mock_map_to_udm = system
args, _ = mock_map_to_udm.call_args
assert args[0]["uid"][0] == 1234
def it_should_use_second_users_page_when_mapping_to_udm(system):
_, mock_map_to_udm = system
args, _ = mock_map_to_udm.call_args
assert args[0]["uid"][1] == 1235
def it_should_use_the_given_roles_when_mapping_to_udm(system):
_, mock_map_to_udm = system
args, _ = mock_map_to_udm.call_args
assert args[1]["id"][0] == 321
def describe_when_getting_sections():
def describe_given_one_course_with_one_section():
@pytest.fixture
def system() -> Tuple[DataFrame, Mock]:
request_client = Mock(spec=RequestClient)
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
page_size = 22
courses = {
"course": [{"id": 3333}],
"total": 1,
"links": {"self": "ignore"},
}
courses_page = PaginatedResult(
request_client, page_size, courses, "course", "ignore me"
)
request_client.get_courses.return_value = courses_page
# Also want to mock the UDM mapper function, since it is well-tested
# elsewhere
sectionsMap.map_to_udm = Mock()
sectionsMap.map_to_udm.return_value = DataFrame()
sections = {
"section": [{"id": 1234}],
"total": 1,
"links": {"self": "ignore"},
}
sections_page = PaginatedResult(
request_client, page_size, sections, "section", "ignore me" # type: ignore
)
get_sections_mock = request_client.get_section_by_course_id
get_sections_mock.return_value = sections_page
# Arrange
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_sections()
return result, sectionsMap.map_to_udm
def it_should_return_a_data_frame(system):
result, _ = system
assert isinstance(result, DataFrame)
def it_should_map_to_the_udm(system):
_, map_to_udm = system
map_to_udm.assert_called_once()
def describe_given_two_pages_of_courses():
@pytest.fixture
def system() -> Tuple[DataFrame, Mock]:
request_client = Mock(spec=RequestClient)
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
page_size = 1
courses = {
"course": [{"id": 3333}],
"total": 1,
"links": {"self": "ignore"},
}
courses_page = PaginatedResult(
request_client, page_size, courses, "course", "ignore me"
)
request_client.get_courses.return_value = courses_page
# Also want to mock the UDM mapper function, since it is well-tested
# elsewhere
sectionsMap.map_to_udm = Mock()
sectionsMap.map_to_udm.return_value = DataFrame()
sections = {
"section": [{"id": 1234}],
"total": 1,
"links": {"self": "ignore"},
}
sections_page = PaginatedResult(
request_client, page_size, sections, "section", "ignore me"
)
get_sections_mock = request_client.get_section_by_course_id
get_sections_mock.return_value = sections_page
# Arrange
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_sections()
return result, get_sections_mock
def it_should_use_first_course_when_getting_sections(system):
_, get_sections_mock = system
args = get_sections_mock.call_args
print(get_sections_mock.call_args[0][0])
assert 3333 == args[0][0]
def describe_when_getting_assignments():
def describe_given_a_section_has_one_assignment():
@pytest.fixture
def system() -> Tuple[DataFrame, Mock, Mock]:
request_client = Mock(spec=RequestClient)
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
page_size = 22
section_id = 1234
assignments = [
{
"id": 3333,
"due": "3456-1-2 01:23:45",
"description": "",
"max_points": 4,
"title": "1",
"type": "assignment",
"section_id": section_id,
}
]
assignments_response_mock = {
"assignment": assignments,
"total": 1,
"links": {"self": "ignore"},
}
assignments_page = PaginatedResult(
request_client,
page_size,
assignments_response_mock,
"assignment",
"ignore me",
)
# Arrange
get_assignments_mock = request_client.get_assignments
get_assignments_mock.return_value = assignments_page
# Mock the UDM mapper
assignmentsMap.map_to_udm = Mock()
assignmentsMap.map_to_udm.return_value = DataFrame()
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_assignments(section_id)
return result, get_assignments_mock, assignmentsMap.map_to_udm
def it_should_return_a_DataFrame(system):
result, _, _ = system
assert isinstance(result, DataFrame)
def it_should_query_for_the_given_section(system):
_, get_assignments_mock, _ = system
args = get_assignments_mock.call_args
assert 1234 == args[0][0]
def it_should_map_results_to_the_udm(system):
_, _, mapper = system
mapper.assert_called_once()
def it_should_map_first_assignment(system):
_, _, mapper = system
df = mapper.call_args[0][0]
assert df["id"].iloc[0] == 3333
def describe_when_getting_submissions():
def describe_given_one_assignment_and_one_submission():
@pytest.fixture
def result() -> DataFrame:
request_client = Mock(spec=RequestClient)
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
# This method will be tested in a different test
sync.sync_resource = Mock(
side_effect=lambda v, w, x, y="", z="": DataFrame(x)
)
# Mock the UDM mapper
submissionsMap.map_to_udm = Mock()
submissionsMap.map_to_udm.side_effect = lambda x: x
page_size = 22
assignment_id = 345
section_id = 123
submissions = {
"revision": [
{
"revision_id": 1,
"uid": 100032890,
}
],
"total": 1,
"links": {"self": "ignore"},
}
submissions_page = PaginatedResult(
request_client, page_size, submissions, "revision", "ignore me"
)
# Arrange
request_client.get_submissions_by_section_id_and_grade_item_id.return_value = (
submissions_page
)
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_submissions(assignment_id, section_id)
return result
def it_should_return_the_submission(result: DataFrame):
assert result["revision_id"][0] == 1
def describe_when_getting_section_associations():
@pytest.fixture
def system() -> Tuple[DataFrame, Mock, Mock]:
request_client = Mock(spec=RequestClient)
page_size = 1
# Also want to mock the UDM mapper function, since it is well-tested
# elsewhere
sectionAssocMap.map_to_udm = Mock()
sectionAssocMap.map_to_udm.return_value = DataFrame()
# Mock the API calls
section_id = 1234
get_sections_mock = request_client.get_enrollments
sections_response_mock = {
"sections": [{"id": 1}, {"id": 2}],
"total": 1,
"links": {"self": "ignore"},
}
sections_page = PaginatedResult(
request_client, page_size, sections_response_mock, "sections", "ignore me"
)
get_sections_mock.return_value = sections_page
# Mock the Sync process
sync.sync_resource = Mock(side_effect=lambda v, w, x, y="", z="": DataFrame(x))
db_engine = Mock(spec=sqlalchemy.engine.base.Engine)
# Arrange
service = ClientFacade(request_client, page_size, db_engine)
# Act
result = service.get_section_associations(section_id)
return result, sectionAssocMap.map_to_udm, sync.sync_resource
def it_should_return_a_data_frame(system):
result, _, _ = system
assert isinstance(result, DataFrame)
def it_should_map_to_the_udm(system):
_, mapper, _ = system
mapper.assert_called_once()
def it_should_map_first_enrollment(system):
_, mapper, _ = system
df = mapper.call_args[0][0]
assert df["id"].iloc[0] == 1
def it_should_map_second_enrollment(system):
_, mapper, _ = system
df = mapper.call_args[0][0]
assert df["id"].iloc[1] == 2
def it_should_use_the_sync_process(system):
_, _, sync_mock = system
sync_mock.assert_called_once()
def describe_when_getting_attendance_events():
@pytest.fixture
def system() -> Tuple[DataFrame, Mock]:
request_client = Mock(spec=RequestClient)
page_size = 1
# Also want to mock the UDM mapper function, since it is well-tested
# elsewhere
attendanceMap.map_to_udm = Mock()
attendanceMap.map_to_udm.return_value = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import numpy as np
import xlrd
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn import datasets
import calendar
import json
import datetime
from pandas_datareader import data
class LRANALYSIS():
def __init__(self,start_date,symbols):
self.start_date = start_date
self.symbols = symbols
self.axx = len(symbols)
self.axy = len(start_date)
self.fig, self.axs = plt.subplots(self.axx,self.axy)
#self.fig.show()
def graphgen(self,start_date, symbol,m,n):
today = datetime.date.today()
end_date = "{}".format(today)
df = data.DataReader(symbol, "yahoo", start_date, end_date)
print(df.reset_index(level=0, inplace=True)) #it worked!
df["Date"]
date = df["Date"]
price = df["Adj Close"]
X = date
y = price
X = X.rename_axis("Date")
print(X,"before adjustment")
Xaxis = X
y = y.rename_axis("Price")
X = X.values.astype("datetime64[D]").astype(int)
print(X,"after the adjustment")
X = | pd.Series(X) | pandas.Series |
import collections
import torch
import os
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
import numpy as np
EPS = 1e-12
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.list = []
def update(self, val, n=1):
self.val = val
self.list.append(val)
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def average_params(params_list):
assert isinstance(params_list, (tuple, list, collections.deque))
n = len(params_list)
if n == 1:
return params_list[0]
new_params = collections.OrderedDict()
keys = None
for i, params in enumerate(params_list):
if keys is None:
keys = params.keys()
for k, v in params.items():
if k not in keys:
raise ValueError('the %d-th model has different params'%i)
if k not in new_params:
new_params[k] = v / n
else:
new_params[k] += v / n
return new_params
def zscore(x):
return (x - x.mean(dim=0, keepdim=True)) / x.std(dim=0, keepdim=True, unbiased=False)
def calc_loss(pred, label):
return torch.mean((zscore(pred) - label) ** 2)
def calc_corr(pred, label):
return (zscore(pred) * zscore(label)).mean()
def test_ic(model_list, data_list, device, verbose=True, ic_type='spearman'):
'''
model_list: [model1, model2, ...]
datalist: [loader1, loader2, ...]
return: unified ic, specific ic (all values), loss
'''
spec_ic = []
loss_test = AverageMeter()
loss_fn = torch.nn.MSELoss()
label_true, label_pred = torch.empty(0).to(device), torch.empty(0).to(device)
for i in range(len(model_list)):
label_spec_true, label_spec_pred = torch.empty(0).to(device), torch.empty(0).to(device)
model_list[i].eval()
with torch.no_grad():
for _, (feature, label_actual, _, _) in enumerate(data_list[i]):
# feature = torch.tensor(feature, dtype=torch.float32, device=device)
label_actual = label_actual.clone().detach().view(-1, 1)
label_actual, mask = handle_nan(label_actual)
label_predict = model_list[i].predict(feature).view(-1, 1)
label_predict = label_predict[mask]
loss = loss_fn(label_actual, label_predict)
loss_test.update(loss.item())
# Concat them for computing IC later
label_true = torch.cat([label_true, label_actual])
label_pred = torch.cat([label_pred, label_predict])
label_spec_true = torch.cat([label_spec_true, label_actual])
label_spec_pred = torch.cat([label_spec_pred, label_predict])
ic = calc_ic(label_spec_true, label_spec_pred, ic_type)
spec_ic.append(ic.item())
unify_ic = calc_ic(label_true, label_pred, ic_type).item()
# spec_ic.append(sum(spec_ic) / len(spec_ic))
loss = loss_test.avg
if verbose:
print('[IC] Unified IC: {:.6f}, specific IC: {}, loss: {:.6f}'.format(unify_ic, spec_ic, loss))
return unify_ic, spec_ic, loss
def test_ic_daily(model_list, data_list, device, verbose=True, ic_type='spearman'):
'''
model_list: [model1, model2, ...]
datalist: [loader1, loader2, ...]
return: unified ic, specific ic (all values + avg), loss
'''
spec_ic = []
loss_test = AverageMeter()
loss_fn = torch.nn.MSELoss()
label_true, label_pred = torch.empty(0).to(device), torch.empty(0).to(device)
for i in range(len(model_list)):
label_spec_true, label_spec_pred = torch.empty(0).to(device), torch.empty(0).to(device)
model_list[i].eval()
with torch.no_grad():
for slc in tqdm(data_list[i].iter_daily(), total=data_list[i].daily_length):
feature, label_actual, _, _ = data_list[i].get(slc)
# for _, (feature, label_actual, _, _) in enumerate(data_list[i]):
# feature = torch.tensor(feature, dtype=torch.float32, device=device)
label_actual = torch.tensor(label_actual, dtype=torch.float32, device=device).view(-1, 1)
label_actual, mask = handle_nan(label_actual)
label_predict = model_list[i].predict(feature).view(-1, 1)
label_predict = label_predict[mask]
loss = loss_fn(label_actual, label_predict)
loss_test.update(loss.item())
# Concat them for computing IC later
label_true = torch.cat([label_true, label_actual])
label_pred = torch.cat([label_pred, label_predict])
label_spec_true = torch.cat([label_spec_true, label_actual])
label_spec_pred = torch.cat([label_spec_pred, label_predict])
ic = calc_ic(label_spec_true, label_spec_pred, ic_type)
spec_ic.append(ic.item())
unify_ic = calc_ic(label_true, label_pred, ic_type).item()
# spec_ic.append(sum(spec_ic) / len(spec_ic))
loss = loss_test.avg
if verbose:
print('[IC] Unified IC: {:.6f}, specific IC: {}, loss: {:.6f}'.format(unify_ic, spec_ic, loss))
return unify_ic, spec_ic, loss
def test_ic_uni(model, data_loader, model_path=None, ic_type='spearman', verbose=False):
if model_path:
model.load_state_dict(torch.load(model_path))
model.eval()
loss_all = []
ic_all = []
for slc in tqdm(data_loader.iter_daily(), total=data_loader.daily_length):
data, label, _, _ = data_loader.get(slc)
with torch.no_grad():
pred = model.predict(data)
mask = ~torch.isnan(label)
pred = pred[mask]
label = label[mask]
loss = torch.mean(torch.log(torch.cosh(pred - label)))
if ic_type == 'spearman':
ic = spearman_corr(pred, label)
elif ic_type == 'pearson':
ic = pearson_corr(pred, label)
loss_all.append(loss.item())
ic_all.append(ic)
loss, ic = np.mean(loss_all), np.mean(ic_all)
if verbose:
print('IC: ', ic)
return loss, ic
def calc_ic(x, y, ic_type='pearson'):
ic = -100
if ic_type == 'pearson':
ic = pearson_corr(x, y)
elif ic_type == 'spearman':
ic = spearman_corr(x, y)
return ic
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def handle_nan(x):
mask = ~torch.isnan(x)
return x[mask], mask
class Log_Loss(nn.Module):
def __init__(self):
super(Log_Loss, self).__init__()
def forward(self, ytrue, ypred):
delta = ypred - ytrue
return torch.mean(torch.log(torch.cosh(delta)))
def spearman_corr(x, y):
X = pd.Series(x.cpu())
Y = pd.Series(y.cpu())
spearman = X.corr(Y, method='spearman')
return spearman
def spearman_corr2(x, y):
X = pd.Series(x)
Y = | pd.Series(y) | pandas.Series |
#
# Analysis of the hvorg_movies
#
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import astropy.units as u
from sunpy.time import parse_time
import hvorg_style as hvos
plt.rc('text', usetex=True)
plt.rc('font', size=14)
figsize = (10, 5)
# Read in the data
directory = os.path.expanduser('~/Data/hvanalysis/derived')
# Image output location
img = hvos.img
# application
application = 'helioviewer.org'
# data product
data_product = 'screenshots'
# Type of data we are looking at
data_analyzed = '{:s} {:s}'.format(application, data_product)
data_type = '{:s}'.format(data_analyzed)
# Time difference
f = os.path.join(directory, 'hvorg_screenshot_time_difference_seconds.npy')
td = np.load(f) * u.s
topicality_subtitle = "{:s} = {:s} - {:s}".format(hvos.durations['tmtopicality'][0], hvos.dates['Tmrequest'], hvos.dates['Tsdate'])
# Screenshot request times
f = os.path.join(directory, "hvorg_screenshot_request_time.pkl")
screenshot_request_time = pickle.load(open(f, 'rb'))
# Number of screenshots
nmovies = len(td)
# Figure 1 : topicality
# Scale size we are interested in
topicality_unit = u.year
# Define the topicality on the scale size
topicality = td.to(topicality_unit).value
# Histogram bins
topicality_bins = 100
# make the plot
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hist(topicality, bins=topicality_bins)
ax.grid(True, linestyle='dotted')
plt.yscale('log')
plt.xlabel(hvos.qlabel(hvos.durations['tmtopicality'][1], hvos.durations['tmtopicality'][0], str(topicality_unit)))
plt.ylabel(hvos.mlabel(len(td), data_type=data_product))
plt.title('{{{:s}}}\n{{{:s}}}'.format(data_type, topicality_subtitle))
plt.tight_layout()
filename = hvos.overleaf(os.path.join(data_type, 'topicality'))
filename = '{:s}.{:s}'.format(filename, hvos.imgfiletype)
filepath = os.path.join(img, filename)
plt.savefig(filepath)
# Figure 2: topicality < 30 days
# Scale size we are interested in
td_short_unit = u.day
# Longest possible topicality
td_short_limit = 30*u.day
# Find the topicalities less than the longest possible
topicality = td.to(td_short_unit)
these = np.abs(topicality) < td_short_limit
topicality = topicality[these].value
# Histogram bins
topicality_bins = int(td_short_limit.to(td_short_unit).value*24)
# Fix the bin size
td_short_fraction = 24
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hist(topicality, bins=topicality_bins)
ax.grid(True, linestyle='dotted')
rl = hvos.relevant_lines(hvos.lines, tr=[0, 30]*u.day)
for key in list(rl.keys()):
kwargs = rl[key]
kwargs['label'] = str(key)
plt.axvline(key.to(td_short_unit).value, **kwargs)
plt.yscale('log')
plt.xlabel(hvos.qlabel(hvos.durations['tmtopicality'][1], hvos.durations['tmtopicality'][0], str(td_short_unit)))
plt.ylabel(hvos.mlabel(len(topicality), data_type=data_product))
plt.title('{{{:s}}}\n{{{:s}}} {{{:s}}} {{{:s}}}'.format(data_type, topicality_subtitle, "$\le$", td_short_limit))
plt.legend()
plt.tight_layout()
filename = hvos.overleaf(os.path.join(data_type, 'topicality_{:s}'.format(str(td_short_limit))))
filename = '{:s}.{:s}'.format(filename, hvos.imgfiletype)
filepath = os.path.join(img, filename)
plt.savefig(filepath)
# Figure 6
# Number of requests as a function of time
title = 'screenshots per quarter'
df = pd.DataFrame(screenshot_request_time, columns=['date'])
# Setting the date as the index since the TimeGrouper works on Index, the date column is not dropped to be able to count
df.set_index('date', drop=False, inplace=True)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
h = df.groupby(pd.TimeGrouper(freq='Q')).count()
h.rename(columns={'date': 'movies'}, inplace=True)
h.plot(kind='bar', ax=ax)
new_ticks = []
for dt in h.index:
new_ticks.append(dt.to_datetime())
ax.set_xticklabels([dt.strftime('%Y-%m-%d') for dt in new_ticks])
ax.set_title(title)
ax.set_ylabel(hvos.mlabel(len(screenshot_request_time)))
ax.set_xlabel('date')
ax.xaxis.set_tick_params(labelsize=10)
ax.grid(linestyle='dotted')
fig.autofmt_xdate(rotation=65)
plt.legend()
plt.tight_layout()
filename = hvos.overleaf(os.path.join(data_type, title))
filename = '{:s}.{:s}'.format(filename, hvos.imgfiletype)
filepath = os.path.join(img, filename)
plt.savefig(filepath)
# Figure 7
# Daily numbers as a plot
title = 'daily screenshots requested'
plt.close('all')
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
h = df.groupby( | pd.TimeGrouper(freq='D') | pandas.TimeGrouper |
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import load_boston
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import boston_housing
logger = logging.getLogger("ACE")
class BostonRegression:
def __init__(self):
self.k_fold_count = 4
self.num_epochs = 500
self.all_mae_histories = []
self.model = None
logger.info("Loading regression dataset")
(self.train_data, self.train_targets), (self.test_data, self.test_targets) = boston_housing.load_data()
self.mean = self.train_data.mean(axis=0)
self.train_data -= self.mean
self.std = self.train_data.std(axis=0)
self.train_data /= self.std
self.test_data -= self.mean
self.test_data /= self.std
def showbostonmeta(self):
logger.info(f"Training data shape # {self.train_data.shape}")
logger.info(f"Test data shape # {self.test_data.shape}")
logger.info(f"Train targets # {self.train_targets}")
boston_dataset = load_boston()
boston = pd.DataFrame(boston_dataset.data, columns=boston_dataset.feature_names)
boston['MEDV'] = boston_dataset.target
head = boston.head()
logger.info(f"\n{head}")
def showmetadata(self):
housing = fetch_california_housing()
keys = housing.keys()
logger.info(f"Data set # {keys}")
def showhead(self):
housing = fetch_california_housing()
cali = | pd.DataFrame(housing.data, columns=housing.feature_names) | pandas.DataFrame |
import unittest
from abc import ABC
import numpy as np
import pandas as pd
from toolbox.ml.ml_factor_calculation import ModelWrapper, calc_ml_factor, generate_indexes
from toolbox.utils.slice_holder import SliceHolder
class MyTestCase(unittest.TestCase):
def examples(self):
# index includes non trading days
# exactly 60 occurrences of each ticker
first = pd.Timestamp(year=2010, month=1, day=1)
self.date_index = pd.MultiIndex.from_product(
[pd.date_range(start=first, end=pd.Timestamp(year=2010, month=3, day=1)),
['BOB', 'JEFF', 'CARL']], names=['date', 'symbol'])
self.expected_index_e5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first, first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first, first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_e7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=37)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=52))),
(SliceHolder(first, first + pd.Timedelta(days=45)),
SliceHolder(first + pd.Timedelta(days=53), first + pd.Timedelta(days=59))),
]
self.expected_index_e5_10_30 = self.turn_to_datetime64(self.expected_index_e5_10_30)
self.expected_index_e7_8_30 = self.turn_to_datetime64(self.expected_index_e7_8_30)
self.expected_index_r5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + | pd.Timedelta(days=44) | pandas.Timedelta |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
| pd.to_datetime("2019") | pandas.to_datetime |
"""
This script contains helper functions to make plots presented in the paper
"""
from itertools import product
from itertools import compress
import copy
from pickle import UnpicklingError
import dill as pickle
from adaptive.saving import *
from IPython.display import display, HTML
import scipy.stats as stats
from glob import glob
from time import time
from scipy.stats import norm
import seaborn as sns
from adaptive.compute import collect
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import cm
from matplotlib.lines import Line2D
import numpy as np
from matplotlib.ticker import FormatStrFormatter
np.seterr(all='raise')
def read_files(file_name):
files = glob(file_name)
print(f'Found {len(files)} files.')
results = []
for file in files:
try:
with open(file, 'rb') as f:
r = pickle.load(f)
results.extend(r)
except: # UnpicklingError:
print(f"Skipping corrupted file: {file}")
return results
def add_config(dfs, r):
dfs = pd.concat(dfs)
for key in r['config']:
if key == 'policy_names':
continue
dfs[key] = r['config'][key]
return dfs
def save_data_timepoints(data, timepoints, method, K, order):
data = data[timepoints, :]
return pd.DataFrame({
"time": np.tile(timepoints, K),
"policy": np.repeat(np.arange(K), len(timepoints)),
"value": data.flatten(order=order),
"method": [method] * data.size,
})
def generate_data_frames(results):
"""
Generate DataFrames from the raw saving results.
"""
df_stats = []
df_probs = []
df_covs = []
for r in results:
CONFIG_COLS = list(r['config'].keys())
CONFIG_COLS.remove('policy_value')
# get statistics table
tabs_stats = []
T = r['config']['T']
for weight, stats in r['stats'].items():
statistics = ['Bias', 'Var']
tab_stat = pd.DataFrame({"statistic": statistics,
"value": stats.flatten(),
'weight': [weight] * len(statistics)
})
tabs_stats.append(tab_stat)
df_stats.append(add_config(tabs_stats, r))
df_stats = pd.concat(df_stats)
# add true standard error, relative variance, relerrors and coverage in df_stats
confidence_level = np.array([0.9, 0.95])
quantile = norm.ppf(0.5+confidence_level/2)
new_stats = []
# group_keys = [*CONFIG_COLS, 'policy', 'weight',]
group_keys = ['experiment', 'policy', 'weight']
for *config, df_cfg in df_stats.groupby(group_keys):
weight = config[0][group_keys.index('weight')]
df_bias = df_cfg.query("statistic=='Bias'")
df_var = df_cfg.query("statistic=='Var'")
true_se = np.std(df_bias['value'])
if true_se < 1e-6:
print(
f"For config {dict(zip([*CONFIG_COLS, 'policy', 'weight'], config))} data is not sufficient, only has {len(df_bias)} samples.")
continue
# relative S.E.
df_relse = pd.DataFrame.copy(df_var)
df_relse['value'] = np.sqrt(np.array(df_relse['value'])) / true_se
df_relse['statistic'] = 'relative S.E.'
# true S.E.
df_truese = pd.DataFrame.copy(df_var)
df_truese['value'] = true_se
df_truese['statistic'] = 'true S.E.'
# relative error
df_relerror = pd.DataFrame.copy(df_bias)
df_relerror['value'] = np.array(df_relerror['value']) / true_se
df_relerror['statistic'] = 'R.E.'
# tstat
df_tstat = | pd.DataFrame.copy(df_bias) | pandas.DataFrame.copy |
import pandas as pd
import numpy as np
from salescleanup import convert_currency
from salescleanup import convert_percent
df = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True")
# Transforming data types
df['Customer Number'].astype('int')
df["Customer Number"] = df['Customer Number'].astype('int')
df['Active'].astype('bool')
df.astype({'Customer Number': 'int', 'Customer Name': 'str'}).dtypes
# Applying functions from salescleanup.py
df['2016'].apply(convert_currency)
df['2017'].apply(convert_currency)
df['2016'].apply(convert_currency) + df['2017'].apply(convert_currency)
# Assigning converted values back to the columns
df['2016'] = df['2016'].apply(convert_currency)
df['2017'] = df['2017'].apply(convert_currency)
# Handling invalid values
df["Jan Units"] = | pd.to_numeric(df['Jan Units'], errors='coerce') | pandas.to_numeric |
from rdkit import Chem
import pandas as pd
from pathlib import Path, PosixPath
import pickle
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--workpath", type=PosixPath, help="absolute path for pkl generation", required=True)
parser.add_argument("--sdf", type=str, help="absolute path of sdf file", required=True)
args = parser.parse_args()
# workpath = Path("/pubhome/qcxia02/git-repo/AI-CONF/datasets/GeoMol/test/drugs-plati")
workpath = args.workpath
sdffile = args.sdf
# mols = Chem.SDMolSupplier("platinum_diverse_dataset_2017_01.sdf", removeHs= False)
# molswoh = Chem.SDMolSupplier("platinum_diverse_dataset_2017_01.sdf")
mols = Chem.SDMolSupplier(sdffile, removeHs= False)
molswoh = Chem.SDMolSupplier(sdffile)
smis = list(map(Chem.MolToSmiles, mols))
smis_woh = list(map(Chem.MolToSmiles, molswoh))
molswoh_addh = list(map(Chem.AddHs, molswoh))
maxconfs = 25
# df = pd.DataFrame({'smiles':smis, 'n_conformers':maxconfs})
# df.to_csv("test_smiles.csv")
df = | pd.DataFrame({'smiles':smis_woh, 'n_conformers':maxconfs}) | pandas.DataFrame |
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True)
data = pd.DataFrame(raw_data.iloc[:240, :4].fillna(0)).astype(float)
data.columns = cols
data.index = pd.date_range('1955-01-01', '2014-12-31', freq='Q')
raw_eur = pd.read_csv('raw_data/EUR_CA.csv', index_col=0, parse_dates=True)
raw_eur = raw_eur[::-1]
raw_eur.index = pd.date_range('1999-01-01', '2015-03-01', freq='M')
raw_eur.index.name = 'Date'
raw_eur = raw_eur.resample('Q', how='sum')
data_eur_gdp_q = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['EUR_GDP_Q'].dropna()
data_eur_gdp_q.columns = ['EUR_GDP_Q']
data_eur_gdp_q.index.name = 'Date'
data_eur_gdp_q = data_eur_gdp_q.loc['1999-03-31':]
end_gdp = pd.DataFrame(data=[data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1],
data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1]],
index=pd.date_range('2014-06-30', '2015-03-31', freq='Q'))
eur_gdp = pd.concat([data_eur_gdp_q, end_gdp])
eur_gdp.columns = ['EUR_CA']
eur_ca = raw_eur.div(eur_gdp)
eur_ca.columns = ['EUR CA']
uk_ca = data['CA % GDP'] / 100.0
uk_ca.columns = ['UK CA']
uk_fa = pd.DataFrame(data.iloc[:, :3])
uk_gdp = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['UK_GDP_Q'].dropna()
uk_gdp_final = pd.concat([uk_gdp, pd.DataFrame(data=[uk_gdp.iloc[-1], uk_gdp.iloc[-1]],
index=pd.date_range('2014-09-01', '2014-12-31', freq='Q'))])
uk_fa_gdp = pd.DataFrame(index=uk_gdp_final.index)
uk_fa_gdp['UK FA Net'] = uk_fa['BoP FA Net'] / uk_gdp_final
uk_fa_gdp['UK FA OI'] = uk_fa['BoP FA OI Net'] / uk_gdp_final
uk_fa_gdp['UK FA PI'] = uk_fa['BoP FA PI Net'] / uk_gdp_final
print(eur_gdp)
eur_fa = pd.read_csv('raw_data/EUR_FA.csv', index_col=0, header=0, parse_dates=True).dropna().astype(float)
eur_fa = eur_fa.iloc[::-1]
print(eur_fa)
eur_fa.index = pd.date_range('2009-01-01', '2015-02-28', freq='M')
eur_fa = eur_fa.resample('Q', how='sum')
print(eur_fa)
eur_fa_gdp = | pd.DataFrame(index=eur_gdp.index) | pandas.DataFrame |
import unittest
import os
from collections import defaultdict
from unittest import mock
import warnings
import pandas as pd
import numpy as np
from dataprofiler.profilers import FloatColumn
from dataprofiler.profilers.profiler_options import FloatOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestFloatColumn(unittest.TestCase):
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 0)
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
self.assertEqual(profiler.sum, 0)
self.assertEqual(profiler.mean, 0)
self.assertTrue(profiler.median is np.nan)
self.assertEqual([np.nan], profiler.mode)
self.assertTrue(profiler.variance is np.nan)
self.assertTrue(profiler.skewness is np.nan)
self.assertTrue(profiler.kurtosis is np.nan)
self.assertTrue(profiler.stddev is np.nan)
self.assertIsNone(profiler.histogram_selection)
self.assertEqual(len(profiler.quantiles), 999)
self.assertIsNone(profiler.data_type_ratio)
def test_single_data_variance_case(self):
data = pd.Series([1.5]).apply(str)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 1.0)
self.assertEqual(profiler.mean, 1.5)
self.assertTrue(profiler.variance is np.nan)
data = pd.Series([2.5]).apply(str)
profiler.update(data)
self.assertEqual(profiler.match_count, 2)
self.assertEqual(profiler.mean, 2.0)
self.assertEqual(profiler.variance, 0.5)
def test_profiled_precision(self):
"""
Checks whether the precision for the profiler is correct.
:return:
"""
df_1 = pd.Series([0.4, 0.3, 0.1, 0.1, 0.1]).apply(str)
df_2 = pd.Series([0.11, 0.11, 0.12, 2.11]).apply(str)
df_3 = pd.Series([4.114, 3.161, 2.512, 2.131]).apply(str)
df_mix = pd.Series([4.1, '3.', 2.52, 2.13143]).apply(str)
float_profiler = FloatColumn("Name")
float_profiler.update(df_3)
self.assertEqual(4, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_2)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_1)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_mix)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(6, float_profiler.precision['max'])
# edge cases #
# integer with 0s on right and left side
df_ints = pd.Series(['0013245678', '123456700', '0012345600'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_ints)
self.assertEqual(6, float_profiler.precision['min'])
self.assertEqual(8, float_profiler.precision['max'])
# scientific
df_scientific = pd.Series(['1.23e-3', '2.2344', '1.244e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_scientific)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# plus
df_plus = pd.Series(['+1.3e-3', '+2.244', '+1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_plus)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# minus
df_minus = pd.Series(['-1.3234e-3', '-0.244', '-1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_minus)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# spaces around values
df_spaces = pd.Series([' -1.3234e-3 ', ' -0.244 '])
float_profiler = FloatColumn("Name")
float_profiler.update(df_spaces)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# constant precision
df_constant = pd.Series(['1.34', '+1.23e-4', '00101',
'+100.', '0.234', '-432', '.954',
'+.342', '-123e1', '23.1'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_constant)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(3, float_profiler.precision['max'])
self.assertEqual(3, float_profiler.precision['mean'])
self.assertEqual(10, float_profiler.precision['sample_size'])
self.assertEqual(0, float_profiler.precision['var'])
self.assertEqual(0, float_profiler.precision['std'])
# random precision
df_random = pd.Series(['+ 9', '-.3', '-1e-3', '3.2343', '0',
'1230', '0.33', '4.3', '302.1', '-4.322'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_random)
self.assertEqual(0, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
self.assertEqual(2.4444, float_profiler.precision['mean'])
self.assertEqual(9, float_profiler.precision['sample_size'])
self.assertEqual(2.7778, float_profiler.precision['var'])
self.assertEqual(1.6667, float_profiler.precision['std'])
# Ensure order doesn't change anything
df_random_order = pd.Series(['1230', '0.33', '4.3', '302.1', '-4.322',
'+ 9', '-.3', '-1e-3', '3.2343', '0'])
float_profiler_order = FloatColumn("Name")
float_profiler_order.update(df_random)
self.assertDictEqual(
float_profiler.precision, float_profiler_order.precision
)
# check to make sure all formats of precision are correctly predicted
samples = [
# value, min expected precision
['10.01', 4],
['.01', 1],
['0.01', 1],
['-0.01', 1],
['+0.01', 1],
[' +0.013', 2],
[' -1.3234e-3 ', 5],
[' 0012345600 ', 6],
[' 0012345600. ', 8],
[' -0012345600. ', 8],
]
for sample in samples:
df_series = pd.Series([sample[0]])
min_expected_precision = sample[1]
precision = FloatColumn._get_float_precision(df_series)
self.assertEqual(min_expected_precision, precision['min'],
msg='Errored for: {}'.format(sample[0]))
def test_profiled_min(self):
# test with multiple values
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[1:])
self.assertEqual(profiler.min, -4)
profiler.update(df)
self.assertEqual(profiler.min, -5)
profiler.update(pd.Series(['-4']))
self.assertEqual(profiler.min, -5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.min, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 0.0)
def test_profiled_max(self):
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[:-1])
self.assertEqual(profiler.max, 4)
profiler.update(df)
self.assertEqual(profiler.max, 5)
profiler.update( | pd.Series(['4']) | pandas.Series |
#!/usr/bin/env python
from collections import defaultdict
import math
import numpy as np
import os
import pandas as pd
import pickle
import pysam
import re
import sys
def get_gene_id(row):
# return row["attribute"].split(";")[0].split()[1][1:-1]
if "gene_name" in row["attribute"]:
return row["attribute"].split("gene_name")[-1].split('"')[1]
elif ";gene=" in row["attribute"]:
return row["attribute"].split(";gene=")[-1].split(";")[0]
def modify_refnames(CI, gtf_file, stranded_library):
gtf_df = pd.read_csv(
gtf_file,
sep="\t",
names=[
"seqname",
"source",
"feature",
"start",
"end",
"score",
"strand",
"frame",
"attribute",
],
comment="#",
)
gtf_df["gene_name"] = gtf_df.apply(get_gene_id, axis=1)
gtf_df = gtf_df[["seqname", "strand", "gene_name"]]
gene_strand_info = gtf_df.drop_duplicates().reset_index(drop=True)
swap_names = False
CI["HIR1B"] = CI["HIR1A"]
# CI = pd.read_csv("/oak/stanford/groups/horence/Roozbeh/single_cell_project/output/HLCA_171205_10X_cSM_10_cJOM_10_aSJMN_0_cSRGM_0/P1_3_S1_L001/test_class_input.tsv","\t")
CI_new = CI.drop_duplicates("refName_ABR1")
CI_new["geneR1A"] = CI_new["geneR1A"].fillna("")
CI_new["geneR1B"] = CI_new["geneR1B"].fillna("")
CI_new.loc[CI_new["fileTypeR1"] == "Aligned", "read_strandR1B"] = CI_new[
CI_new["fileTypeR1"] == "Aligned"
]["read_strandR1A"]
# CI_new["read_strandR1A_orig"] = CI_new["read_strandR1A"]
# CI_new["read_strandR1B_orig"] = CI_new["read_strandR1B"]
CI_new["gene_strandR1A"] = (
CI_new["refName_ABR1"].astype(str).str.split("|").str[0].str.split(":").str[-1]
)
CI_new["gene_strandR1B"] = (
CI_new["refName_ABR1"].astype(str).str.split("|").str[1].str.split(":").str[-1]
)
CI_new["numgeneR1A"] = (
CI_new["geneR1A"].astype(str).str.split(",").str.len()
) # .astype("Int32") # the number of overlapping genes on the R1A side
CI_new[["numgeneR1A"]] = CI_new[["numgeneR1A"]].fillna(0)
CI_new["numgeneR1B"] = (
CI_new["geneR1B"].astype(str).str.split(",").str.len()
) # .astype("Int32") # the number of overlapping genes on the R1B side
CI_new[["numgeneR1B"]] = CI_new[["numgeneR1B"]].fillna(0)
# display(CI_new[CI_new["id"] == "A00111:88:H55NYDMXX:1:1101:15365:8469_TATCAGGCATTATCTC_GCAACGGCAG"])
weird_genes = ["SNORA", "RP11", "RP4-", "SCARNA", "DLEU2", "SNORD", "CTSLP2"]
for weird_gene in weird_genes:
for suff in ["A", "B"]:
ind = CI_new[
(
(CI_new["numgeneR1" + suff] > 2)
& (
CI_new["geneR1" + suff]
.astype(str)
.str.contains(weird_gene, na=False)
)
)
| (
(CI_new["numgeneR1" + suff] > 1)
& ~(CI_new["gene_strandR1" + suff] == "?")
& (
CI_new["geneR1" + suff]
.astype(str)
.str.contains(weird_gene, na=False)
)
)
].index
CI_new.loc[ind, "geneR1" + suff] = (
CI_new.loc[ind, "geneR1" + suff]
.astype(str)
.str.replace("{}[^,]*[,]".format(weird_gene), "", regex=True)
.astype(str)
.str.replace(",{}.*".format(weird_gene), "")
)
CI_new.loc[ind, "numgeneR1" + suff] = (
CI_new.loc[ind, "geneR1" + suff].astype(str).str.split(",").str.len()
) # .astype("Int32")
CI_new["shared_gene"] = [
",".join([x for x in a.split(",") if x in b.split(",")])
for a, b in zip(CI_new["geneR1A"], CI_new["geneR1B"])
]
# display(CI_new[CI_new["id"] == "A00111:88:H55NYDMXX:1:1101:15365:8469_TATCAGGCATTATCTC_GCAACGGCAG"])
CI_new["num_shared_genes"] = (
CI_new["shared_gene"].astype(str).str.split(",").str.len()
)
CI_new.loc[CI_new["shared_gene"] == "", "num_shared_genes"] = 0
ind = CI_new[
(CI_new["num_shared_genes"] > 0)
& ((CI_new["numgeneR1A"] > 1) | (CI_new["numgeneR1B"] > 1))
].index
# display(CI_new.loc[[67],"geneR1A"])
CI_new.loc[ind, "geneR1A"] = (
CI_new.loc[ind]["shared_gene"].astype(str).str.split(",").str[-1]
)
CI_new.loc[ind, "geneR1B"] = (
CI_new.loc[ind]["shared_gene"].astype(str).str.split(",").str[-1]
)
CI_new["geneR1A_uniq"] = CI_new["geneR1A"]
CI_new["geneR1B_uniq"] = CI_new["geneR1B"]
# display(CI_new[CI_new["id"] == "A00111:88:H55NYDMXX:1:1101:15365:8469_TATCAGGCATTATCTC_GCAACGGCAG"])
ind = CI_new[(CI_new["numgeneR1A"] > 1) & (CI_new["num_shared_genes"] == 0)].index
CI_new.loc[ind, "geneR1A_uniq"] = (
CI_new.loc[ind]["geneR1A"].astype(str).str.split(",").str[-1]
)
ind = CI_new[(CI_new["numgeneR1B"] > 1) & (CI_new["num_shared_genes"] == 0)].index
CI_new.loc[ind, "geneR1B_uniq"] = (
CI_new.loc[ind]["geneR1B"].astype(str).str.split(",").str[-1]
)
for let in ["A", "B"]:
CI_new = CI_new.merge(
gene_strand_info,
how="left",
left_on=["geneR1{}_uniq".format(let), "chrR1{}".format(let)],
right_on=["gene_name", "seqname"],
)
CI_new = CI_new.rename(columns={"strand": "gene_strandR1{}_new".format(let)})
CI_new = CI_new.drop(["gene_name", "seqname"], axis=1)
# if the library is stranded, we want to keep the read strand; the genes should all come from that strand as well (when not, it seems to be due to strand ambiguity, i.e. the gene appears on both strands)
if stranded_library:
for let in ["A", "B"]:
CI_new["gene_strandR1{}_new".format(let)] = CI_new[
"read_strandR1{}".format(let)
]
ind = CI_new[
(
(
(
(CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"])
& (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])
)
| (
(CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"])
& (~CI_new["gene_strandR1B_new"].isna())
& (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"])
)
)
& (CI_new["gene_strandR1A"] == "?")
& (CI_new["num_shared_genes"] == 0)
)
& (CI_new["numgeneR1A"] > 1)
].index
CI_new.loc[ind, "geneR1A_uniq"] = (
CI_new.loc[ind]["geneR1A"].astype(str).str.split(",").str[-2]
)
CI_new = CI_new.drop(["gene_strandR1A_new"], axis=1)
CI_new = CI_new.merge(
gene_strand_info,
how="left",
left_on=["geneR1A_uniq", "chrR1A"],
right_on=["gene_name", "seqname"],
)
CI_new = CI_new.drop(["gene_name", "seqname"], axis=1)
CI_new = CI_new.rename(columns={"strand": "gene_strandR1A_new"})
ind = CI_new[
(
(
(CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"])
& (~CI_new["gene_strandR1A_new"].isna())
& (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])
)
| (
(CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"])
& (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"])
)
)
& (CI_new["gene_strandR1B"] == "?")
& (CI_new["num_shared_genes"] == 0)
& (CI_new["numgeneR1B"] > 1)
].index
CI_new.loc[ind, "geneR1B_uniq"] = (
CI_new.loc[ind]["geneR1B"].astype(str).str.split(",").str[-2]
)
CI_new = CI_new.drop(["gene_strandR1B_new"], axis=1)
CI_new = CI_new.merge(
gene_strand_info,
how="left",
left_on=["geneR1B_uniq", "chrR1B"],
right_on=["gene_name", "seqname"],
)
CI_new = CI_new.rename(columns={"strand": "gene_strandR1B_new"})
CI_new = CI_new.drop(["gene_name", "seqname"], axis=1)
if stranded_library:
for let in ["A", "B"]:
CI_new["gene_strandR1{}_new".format(let)] = CI_new[
"read_strandR1{}".format(let)
]
reverse = {"+": "-", "-": "+"}
same = {"-": "-", "+": "+"}
ind = CI_new[
(CI_new["gene_strandR1B_new"].isna())
& (CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"])
].index
CI_new.loc[ind, "gene_strandR1B_new"] = CI_new.loc[ind]["read_strandR1B"].map(same)
ind = CI_new[
(CI_new["gene_strandR1B_new"].isna())
& (CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"])
& (~CI_new["gene_strandR1A_new"].isna())
].index
CI_new.loc[ind, "gene_strandR1B_new"] = CI_new.loc[ind]["read_strandR1B"].map(
reverse
)
ind = CI_new[
(CI_new["gene_strandR1A_new"].isna())
& (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])
].index
CI_new.loc[ind, "gene_strandR1A_new"] = CI_new.loc[ind]["read_strandR1A"].map(same)
ind = CI_new[
(CI_new["gene_strandR1A_new"].isna())
& (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"])
& (~CI_new["gene_strandR1B_new"].isna())
].index
CI_new.loc[ind, "gene_strandR1A_new"] = CI_new.loc[ind]["read_strandR1A"].map(
reverse
)
CI_new["refName_newR1"] = ""
CI_new["geneR1B_uniq"].fillna("", inplace=True)
CI_new["geneR1A_uniq"].fillna("", inplace=True)
CI_new["reverse"] = False
ind = CI_new[
(CI_new["fileTypeR1"] == "Aligned")
& (CI_new["gene_strandR1A_new"] == "-")
& (CI_new["juncPosR1A"] < CI_new["juncPosR1B"])
].index
CI_new.loc[ind, "refName_newR1"] = (
CI_new.loc[ind]["chrR1B"]
+ ":"
+ CI_new.loc[ind]["geneR1B_uniq"].astype(str)
+ ":"
+ CI_new.loc[ind]["juncPosR1B"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1B_new"]
+ "|"
+ CI_new.loc[ind]["chrR1A"]
+ ":"
+ CI_new.loc[ind]["geneR1A_uniq"].astype(str)
+ ":"
+ CI_new.loc[ind]["juncPosR1A"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1A_new"]
)
CI_new.loc[ind, "reverse"] = True
name_swap = {}
for c in CI_new.columns:
if "R1A" in c:
name_swap[c] = c.replace("R1A", "R1B")
name_swap[c.replace("R1A", "R1B")] = c
# CI_new = pickle.load(open("/scratch/PI/horence/JuliaO/single_cell/STAR_wrapper/output/test/CI_new.pkl","rb"))
if swap_names:
CI_new.loc[ind] = CI_new.loc[ind].rename(columns=name_swap)
ind = CI_new[
(CI_new["fileTypeR1"] == "Aligned") & (CI_new["gene_strandR1A_new"] == "+")
].index
CI_new.loc[ind, "refName_newR1"] = (
CI_new.loc[ind]["chrR1A"]
+ ":"
+ CI_new.loc[ind]["geneR1A_uniq"]
+ ":"
+ CI_new.loc[ind]["juncPosR1A"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1A_new"]
+ "|"
+ CI_new.loc[ind]["chrR1B"]
+ ":"
+ CI_new.loc[ind]["geneR1B_uniq"]
+ ":"
+ CI_new.loc[ind]["juncPosR1B"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1B_new"]
)
ind = CI_new[
(CI_new["fileTypeR1"] == "Chimeric")
& (CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"])
& (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"])
].index
CI_new.loc[ind, "refName_newR1"] = (
CI_new.loc[ind]["chrR1B"]
+ ":"
+ CI_new.loc[ind]["geneR1B_uniq"]
+ ":"
+ CI_new.loc[ind]["juncPosR1B"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1B_new"]
+ "|"
+ CI_new.loc[ind]["chrR1A"]
+ ":"
+ CI_new.loc[ind]["geneR1A_uniq"]
+ ":"
+ CI_new.loc[ind]["juncPosR1A"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1A_new"]
)
CI_new.loc[ind, "reverse"] = True
if swap_names:
CI_new.loc[ind] = CI_new.loc[ind].rename(columns=name_swap)
ind = CI_new[
(CI_new["fileTypeR1"] == "Chimeric")
& (
(CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"])
| (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])
)
].index
CI_new.loc[ind, "refName_newR1"] = (
CI_new.loc[ind]["chrR1A"]
+ ":"
+ CI_new.loc[ind]["geneR1A_uniq"].astype(str)
+ ":"
+ CI_new.loc[ind]["juncPosR1A"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1A_new"]
+ "|"
+ CI_new.loc[ind]["chrR1B"]
+ ":"
+ CI_new.loc[ind]["geneR1B_uniq"].astype(str)
+ ":"
+ CI_new.loc[ind]["juncPosR1B"].astype(str)
+ ":"
+ CI_new.loc[ind]["gene_strandR1B_new"]
)
ind1 = CI_new[
(CI_new["refName_newR1"] == "") | (CI_new["refName_newR1"].isna())
].index # this ind1 is used to simply replace refName_newR1 with the refName_ABR1
CI_new.loc[ind1, "refName_newR1"] = CI_new.loc[ind1]["refName_ABR1"]
ref_dict = pd.Series(
CI_new.refName_newR1.values, index=CI_new.refName_ABR1
).to_dict()
rev_dict = | pd.Series(CI_new.reverse.values, index=CI_new.refName_ABR1) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
from .mapreducefunction import MapReduceFunction
from modin.utils import try_cast_to_pandas, hashable
class GroupbyReduceFunction(MapReduceFunction):
@classmethod
def call(cls, map_func, reduce_func=None, **call_kwds):
"""
Build GroupbyReduce function.
Parameters
----------
map_func: str, callable or dict,
If 'str' this parameter will be treated as a function name to register,
so 'map_func' and 'reduce_func' will be grabbed from 'groupby_reduce_functions'.
If dict or callable then this will be treated as a function to apply to each group
at the map phase.
reduce_func: callable or dict (optional),
A function to apply to each group at the reduce phase. If not specified
will be set the same as 'map_func'.
**call_kwds: kwargs,
Kwargs that will be passed to the returned function.
Returns
-------
Callable,
Function that executes GroupBy aggregation with MapReduce algorithm.
"""
if isinstance(map_func, str):
def build_fn(name):
return lambda df, *args, **kwargs: getattr(df, name)(*args, **kwargs)
map_func, reduce_func = map(build_fn, groupby_reduce_functions[map_func])
if reduce_func is None:
reduce_func = map_func
assert not (
isinstance(map_func, dict) ^ isinstance(reduce_func, dict)
) and not (
callable(map_func) ^ callable(reduce_func)
), "Map and reduce functions must be either both dict or both callable."
return lambda *args, **kwargs: cls.caller(
*args, map_func=map_func, reduce_func=reduce_func, **kwargs, **call_kwds
)
@classmethod
def map(
cls,
df,
other=None,
axis=0,
by=None,
groupby_args=None,
map_func=None,
map_args=None,
drop=False,
):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
groupby_args["observed"] = True
if other is not None:
# Other is a broadcasted partition that represents 'by' columns
# Concatenate it with 'df' to group on its columns names
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]],
axis=1,
)
other = list(other.columns)
by_part = other
else:
by_part = by
apply_func = cls.try_filter_dict(map_func, df)
result = apply_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **map_args
)
# Result could not always be a frame, so wrapping it into DataFrame
return | pandas.DataFrame(result) | pandas.DataFrame |
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.metrics import accuracy_score, recall_score, precision_score, roc_auc_score
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.naive_bayes import ComplementNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
# define functions
def import_and_decode(file_name):
"""Imports sas files and converts columns of type 'bytes' to 'utf-8'.
Parameters
----------
file_name : String. File path and name with .xpt extension (sas file).
Returns
-------
DataFrame"""
df = pd.read_sas(file_name)
for col in df:
if df[col].dtype == 'object':
df[col] = df[col].map(lambda x: x.decode("utf-8"))
return df
def order_features(weights, X_train):
"""Helper function to put model coefficients in order according to the absolute value of their weights.
Parameters
----------
weights : ndarray of shape (1, n_features), for example the 'coef_' attribute of sklearn models.
X: DataFrame of predictors used to train the model.
Returns
-------
DataFrame of coefficients ordered from greatest to least weight"""
coef_dict = {}
for n, c in enumerate(X_train.columns):
coef_dict[c]=round(weights[0][n],4)
sorted_coef_dict = {k: v for k, v in sorted(coef_dict.items(), key=lambda item: item[1], reverse=True)}
df = pd.DataFrame.from_dict(sorted_coef_dict, orient='index', columns=['weight'])
df['abs_weight']=np.abs(df['weight'])
weights_df = df.sort_values(by = 'abs_weight', ascending=False)
return weights_df
def order_features_tree(weights, X_train):
"""Helper function to put model coefficients in order according to the absolute value of their weights.
Parameters
----------
weights : nndarray of shape (n_features,), for example the 'feature_importances_' attribute of tree-based sklearn models.
X: DataFrame of predictors used to train the model.
Returns
-------
DataFrame of coefficients ordered from greatest to least weight"""
coef_dict = {}
for n, c in enumerate(X_train.columns):
coef_dict[c]=round(weights[n],4)
sorted_coef_dict = {k: v for k, v in sorted(coef_dict.items(), key=lambda item: item[1], reverse=True)}
df = pd.DataFrame.from_dict(sorted_coef_dict, orient='index', columns=['weight'])
df['abs_weight']=np.abs(df['weight'])
weights_df = df.sort_values(by = 'abs_weight', ascending=False)
return weights_df
def k_fold_validator(X, y, classifier, cv=5):
"""Uses k-fold cross-validation to calculate the mean recall, precision, and f1 scores
for train and test sets for a model. Also prints the weights of the model coefficients
and plots a confusion matrix for each test set.
Parameters
----------
X : DataFrame, Predictors
y : series, Labels assigned
classifier : An instance of a classifier.
cv : int, How many folds to use when cross-validating. Default = 5.
Returns
-------
No objects returned.
Prints mean recall and precision scores for train and test sets.
Prints a list of the model coefficients and their weights.
Plots a confusion matrix for each test set."""
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
X_scaled = pd.DataFrame(X_scaled, index=X.index, columns=X.columns)
kf = KFold(n_splits=cv, random_state=807, shuffle=True)
clf = classifier
train_recall_scores = []
train_precision_scores = []
train_roc_auc_scores = []
test_recall_scores = []
test_precision_scores = []
test_roc_auc_scores = []
print('Classifier:', clf)
print('Cross-validation folds:', cv)
for train_index, test_index in kf.split(X_scaled):
X_tr, X_test = X_scaled.iloc[train_index], X_scaled.iloc[test_index]
y_tr, y_test = y.iloc[train_index], y.iloc[test_index]
clf.fit(X_tr, y_tr)
y_pred_tr = clf.predict(X_tr)
y_pred_test = clf.predict(X_test)
train_recall_scores.append(recall_score(y_tr, y_pred_tr, pos_label=1.0))
train_precision_scores.append(precision_score(y_tr, y_pred_tr, pos_label=1.0))
train_roc_auc_scores.append(roc_auc_score(y_tr, y_pred_tr))
test_recall_scores.append(recall_score(y_test, y_pred_test, pos_label=1.0))
test_precision_scores.append(precision_score(y_test, y_pred_test, pos_label=1.0))
test_roc_auc_scores.append(roc_auc_score(y_test, y_pred_test))
plot_confusion_matrix(clf, X_test, y_test)
plt.title('Error Matrix - Test Set', fontsize=18, pad=15)
plt.xticks(ticks=(0,1), labels=['Not \nHospitalized', 'Hospitalized'], fontsize=12)
plt.yticks(ticks=(0,1), labels=['Not \nHospitalized', 'Hospitalized'], fontsize=12)
plt.xlabel('Predicted Label', labelpad=15)
plt.ylabel('True Label', labelpad=15)
print('\n')
print('Train mean recall: {} +/- {}'.format(round(pd.Series(train_recall_scores).mean(), 2),
round(pd.Series(train_recall_scores).std(), 2)))
print('Train mean precision: {} +/- {}'.format(round(pd.Series(train_precision_scores).mean(), 2),
round(pd.Series(train_precision_scores).std(), 2)))
print('Train mean ROC-AUC: {} +/- {}'.format(round(pd.Series(train_roc_auc_scores).mean(), 2),
round( | pd.Series(train_roc_auc_scores) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.