prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from pathlib import Path
from .utils import logging, get_cols, DATABASE
logger = logging.getLogger(__name__)
cwd = Path(__file__).parent
def main(_, name, level, row):
con = f'postgresql:///{DATABASE}'
file = (cwd / f'../../../inputs/cod/{name}.xlsx')
sheets = | pd.ExcelFile(file, engine='openpyxl') | pandas.ExcelFile |
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
from datetime import timedelta as td
import sys
import interpolate_matrix_test as interpolate_helpers
###############################
# 1. Set parameters
# 2. READ IN IN FILES
# 3. SET DATA OBJECTS
# 4. WRITE OUT RESULTS TO REQUIRED TXT AND CSV FILES REQUIRED FOR OPTIMISATION
###############################
###############################
#TODO:
# 1. Change .xlsx files to .csv files
# 2. Fix folder reference files
# 3. interpolate_helpers.output_write_df writes multiple dfs when > 1 columns in 'input_df'
###############################
#### SET PARAMETERS
year_length = 8760
interval_length = 1
repeat_ren_signals = 20
repeat_other_signals = 20
if interval_length == 1:
interval_len = "_hourly"
elif interval_length == 4:
interval_len = "_15min"
elif interval_length == 12:
interval_len = "_5min"
month_vec = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
dir2 = 'C:/Users/w47147/misc_code/RODeO-master/RODeO-master/Create_tariff_files/Data_files/'
dir1 = dir2 + "CSV_data/"
dir0 = dir2 + "TXT_files/"
#### READ IN IN FILES
num1int = pd.read_csv(dir2 + "CSV_data/e_tou_8760.csv")
num1A = pd.read_csv(dir2 + "CSV_data/e_prices.csv")
num6A = pd.read_csv(dir1 + "d_tou_prices.csv")
numlBint = pd.read_excel(dir1 + "GAMS_Energy_Sale.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num1B = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_Energy_Purchase.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num1BB = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_AS.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num2 = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_FUEL.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num3 = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_renewables.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num4 = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "linear")
numlBint = pd.read_excel(dir1 + "GAMS_additional_load_profiles.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num9 = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_product_price.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num10A = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_product_consumed.xlsx", skiprows = 1, sheet_name = 'Sheet1')
for column in numlBint.columns:
if column not in ["Date", "Interval"]:
numlBint[column] = numlBint[column]/sum(numlBint[column]) * year_length/24
num11A = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_Max_input_cap.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num12A = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
numlBint = pd.read_excel(dir1 + "GAMS_Max_output_cap.xlsx", skiprows = 1, sheet_name = 'Sheet1')
num13A = interpolate_helpers.interpolate_matrix(numlBint, year_length, interval_length, "repeat")
num1int_ = num1int.copy()
for column in num1int_.columns:
for price_integer in num1A.index:
num1int_.loc[num1int_[column] == price_integer, column] = num1A.loc[price_integer][str(float(column))]
num1 = interpolate_helpers.interpolate_matrix(num1int, year_length, interval_length, "repeat")
num1_ = interpolate_helpers.interpolate_matrix(num1int_, year_length, interval_length, "repeat")
num1A1 = num1_.fillna(0).copy()
num5 = pd.read_csv(dir2 + "CSV_data/d_flat_prices.csv")
num8 = pd.read_csv(dir1 + "fixed_charge.csv")
numA = pd.read_csv(dir1 + "tariff_property_list.csv")
Scenarios1 = numA['label']
num5int = | pd.read_csv(dir2 + "CSV_data/d_flat_prices.csv") | pandas.read_csv |
import numpy as np
import arviz as az
import seaborn as sns
import pandas as pd
import pickle as pkl
import importlib
import anndata as ad
import ast
import matplotlib.pyplot as plt
from scdcdm.util import result_classes as res
from scdcdm.util import multi_parameter_sampling as mult
from scdcdm.util import multi_parameter_analysis_functions as ana
from scdcdm.util import data_generation as gen
from scdcdm.model import other_models as om
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
sns.set_style("ticks")
sns.set_context("paper", font_scale=1.4)
result_path = "C:\\Users\\Johannes\\Documents\\Uni\\Master's_Thesis\\compositionalDiff-johannes_tests_2\\data\\benchmark_results"
#%%
importlib.reload(ana)
# Get data:
# all_study_params: One line per data point
# all_study_params_agg: Combine identical simulation parameters
path = "C:\\Users\\Johannes\\Documents\\Uni\\Master's_Thesis\\compositionalDiff-johannes_tests_2\\data\\model_comparison"
results, all_study_params, all_study_params_agg = ana.multi_run_study_analysis_prepare(path)
#%%
all_study_params_agg = ana.get_scores(all_study_params_agg, models=4)
all_study_params = ana.get_scores(all_study_params, models=4)
# Set deprecated cases for poisson model (1 sample per group) to mcc=0
all_study_params.loc[all_study_params["n_samples"] == "[1, 1]", "mcc_0"] = 0
all_study_params_agg.loc[all_study_params_agg["n_samples"] == "[1, 1]", "mcc_0"] = 0
#%%
# Find relations between numerical values and compsition/inrease vectors
b = []
for y1_0 in [200, 400, 600, 800, 1000]:
b_i = np.round(gen.counts_from_first(y1_0, 5000, 5), 3)
b.append(np.round(np.log(b_i / 5000), 2))
print(b)
b_counts = dict(zip([b_i[0] for b_i in b], [200, 400, 600, 800, 1000]))
b2 = []
for y1_0 in [200, 400, 600, 800, 1000]:
b_i = np.round(gen.counts_from_first(y1_0, 5000, 5), 3)
b2.append(b_i)
b_w_dict = {}
w_rel_dict = {}
i = 0
w_all = []
for b_i in b2:
b_t = np.round(np.log(b_i / 5000), 3)
print(b_t)
w_d = {}
for change in [b_i[0]/3, b_i[0]/2, b_i[0], b_i[0]*2, b_i[0]*3]:
_, w = gen.b_w_from_abs_change(b_i, change, 5000)
w_0 = np.round(w[0], 3)
w_d[w_0] = change
rel_change = np.round(change/b_i[0], 2)
w_rel_dict[w_0] = rel_change
b_w_dict[b_t[0]] = w_d
i += 1
print(b_w_dict)
#%%
# Get some new metrics that make plotting more convenient
all_study_params_agg["n_controls"] = [ast.literal_eval(x)[0] for x in all_study_params_agg["n_samples"].tolist()]
all_study_params_agg["n_cases"] = [ast.literal_eval(x)[1] for x in all_study_params_agg["n_samples"].tolist()]
all_study_params_agg["n_total"] = all_study_params_agg["n_total"].astype("float")
all_study_params_agg["w"] = [ast.literal_eval(x)[0][0] for x in all_study_params_agg["w_true"]]
all_study_params_agg["b_0"] = [ast.literal_eval(x)[0] for x in all_study_params_agg["b_true"]]
all_study_params_agg["b_count"] = [b_counts[np.round(x, 2)] for x in all_study_params_agg["b_0"]]
bs = all_study_params_agg["b_0"].tolist()
ws = all_study_params_agg["w"].tolist()
rels = [0.33, 0.5, 1, 2, 3]
increases = []
rel_changes = []
for i in range(len(bs)):
increases.append(b_w_dict[bs[i]][ws[i]])
rel_changes.append(w_rel_dict[ws[i]])
all_study_params_agg["num_increase"] = increases
all_study_params_agg["rel_increase"] = rel_changes
all_study_params_agg["log_fold_increase"] = np.round(np.log2((all_study_params_agg["num_increase"] +
all_study_params_agg["b_count"]) /
all_study_params_agg["b_count"]), 2)
print(all_study_params_agg)
all_study_params["n_controls"] = [ast.literal_eval(x)[0] for x in all_study_params["n_samples"].tolist()]
all_study_params["n_cases"] = [ast.literal_eval(x)[1] for x in all_study_params["n_samples"].tolist()]
all_study_params["n_total"] = all_study_params["n_total"].astype("float")
all_study_params["w"] = [ast.literal_eval(x)[0][0] for x in all_study_params["w_true"]]
all_study_params["b_0"] = [ast.literal_eval(x)[0] for x in all_study_params["b_true"]]
all_study_params["b_count"] = [b_counts[np.round(x, 2)] for x in all_study_params["b_0"]]
bs = all_study_params["b_0"].tolist()
ws = all_study_params["w"].tolist()
rels = [0.33, 0.5, 1, 2, 3]
increases = []
rel_changes = []
for i in range(len(bs)):
increases.append(b_w_dict[bs[i]][ws[i]])
rel_changes.append(w_rel_dict[ws[i]])
all_study_params["num_increase"] = increases
all_study_params["rel_increase"] = rel_changes
all_study_params["log_fold_increase"] = np.round(np.log2((all_study_params["num_increase"] +
all_study_params["b_count"]) /
all_study_params["b_count"]), 2)
print(all_study_params)
#%%
result_path = "C:\\Users\\Johannes\\Documents\\Uni\\Master's_Thesis\\compositionalDiff-johannes_tests_2\\data\\benchmark_results"
with open(result_path + "\\model_comparison_results_aggregated.pkl", "wb") as f:
pkl.dump(all_study_params_agg, file=f, protocol=4)
with open(result_path + "\\model_comparison_results.pkl", "wb") as f:
pkl.dump(all_study_params, file=f, protocol=4)
#%%
# Convert data from wide to long
models = ["Poisson (Haber et al.)", "Simple DM", "scdcdm", "scDC (SydneyBioX)"]
param_cols = ["b_count", "num_increase", "n_controls", "n_cases", "log_fold_increase"]
metrics = ["tpr", "tnr", "precision", "accuracy", "youden", "f1_score", "mcc"]
final_df_agg = | pd.DataFrame() | pandas.DataFrame |
'''
Module that contains functions for intergenic mode.
'''
import subprocess
import os
from multiprocessing import Pool
from .misc import load_exp
import functools
import pandas as pd
import numpy as np
'''
Define a function that can get the gene expression given a tag directory, a GTF file, a normalization method, and
strandedness.
'''
def get_gene_exp(args):
tag_directory,gtf_file,norm,stranded,out_file = args
if stranded:
strand = ['+']
else:
strand = ['both']
f = open(out_file,'w')
subprocess.call(['analyzeRepeats.pl',gtf_file,'none',norm,'-strand']+strand+['-count','genes','-d',tag_directory],
stdout=f,stderr=subprocess.PIPE)
f.close()
'''
Define a function that can get the gene expression (both normalized and un-normalized) given a list of tag directories,
a GTF file, and strandedness.
'''
def get_multi_gene_exp(tag_dirs,gtf_file,stranded,out_dir,cpu):
#Format and run commands for getting initial gene expression.
cmds = []
for norm in ['-raw','-fpkm']:
cmds += [(tag_dir,gtf_file,norm,stranded,os.path.join(out_dir,tag_dir.split('/')[-1]+f'.{norm[1:]}.txt'))
for tag_dir in tag_dirs]
pool = Pool(processes=min(len(cmds),cpu))
pool.map(get_gene_exp,cmds)
pool.close()
#Join all of these files together.
raw_dfs = []
fpkm_dfs = []
for y in [x[-1] for x in cmds]:
if y[-7:] == 'raw.txt':
raw_dfs.append(load_exp(y))
os.remove(y)
else:
fpkm_dfs.append(load_exp(y))
os.remove(y)
raw_df = functools.reduce(lambda x,y: pd.merge(x,y,on=['ID','Length']),raw_dfs)
raw_df = raw_df[['ID','Length']+sorted(raw_df.columns[2:])]
fpkm_df = functools.reduce(lambda x,y: pd.merge(x,y,on=['ID','Length']),fpkm_dfs)
fpkm_df = fpkm_df[['ID','Length']+sorted(fpkm_df.columns[2:])]
raw_df.to_csv(os.path.join(out_dir,'gene.exp.raw.txt'),sep='\t',index=False)
fpkm_df.to_csv(os.path.join(out_dir,'gene.exp.fpkm.txt'),sep='\t',index=False)
'''
Define a function that can get the maximum isoform for all genes when given a gene expression file and a
gene-to-transcript mapping.
'''
def get_max_isoform(gene_exp_file,gene_to_transcript_file,out_dir):
#Load gene expression file into dataframe.
gene_exp = pd.read_csv(gene_exp_file,sep='\t')
del gene_exp['Length']
gene_exp = gene_exp.set_index('ID')
#Get max expression.
gene_exp['Max Exp'] = gene_exp.max(axis=1)
#Load gene-to-transcript mapping.
gene_to_transcript = pd.read_csv(gene_to_transcript_file,sep='\t')
#Get maximum expression for each gene.
gene_exp = | pd.merge(gene_to_transcript,gene_exp,left_on='Transcript ID',right_index=True) | pandas.merge |
from lib.allgemein import liste_in_floats_umwandeln
import pandas as pd
import untangle
from decimal import *
#written by <NAME>
def get_xml_RecordTime_excitationwl(dateiname):
obj = untangle.parse(dateiname)
RecordTime = obj.XmlMain.Documents.Document['RecordTime']
excitationwl = float(obj.XmlMain.Documents.Document.xDim.Calibration['LaserWave'])
return RecordTime, excitationwl
def get_timestamps(dateiname):
obj = untangle.parse(dateiname)
predf = []
for i in range(0, len(obj.XmlMain.Documents.Document.Data.Frame)):
timestamp = obj.XmlMain.Documents.Document.Data.Frame[i]['TimeStamp']
timestamp = Decimal(timestamp)
predf.append(timestamp)
posi = list(range(0, len(predf), 1))
colunames = []
for i in posi:
colu = 'Frame ' + str(i + 1)
colunames.append(colu)
df = pd.DataFrame(predf, index=colunames, columns=['timestamp'])
df_timestamps = df.transpose()
return df_timestamps
def get_positions(dateiname):
obj = untangle.parse(dateiname)
predf = []
for i in range(0,len(obj.XmlMain.Documents.Document.Data.Frame)):
positions = obj.XmlMain.Documents.Document.Data.Frame[i]['ValuePosition']
z = positions.split(";")
ft = liste_in_floats_umwandeln(z)
predf.append(ft)
posi=list(range(0, len(predf),1))
colunames = []
for i in posi:
colu = 'Frame ' + str(i + 1)
colunames.append(colu)
df = | pd.DataFrame(predf, index=colunames, columns=['x [µm]','y [µm]','z [µm]']) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Dynamic Allocation
#
# We really don't save money this way (defined amounts every month). In reality we would be better to set up a budget and allocate any leftover money to savings that is above and beyond our spending habits.
# %%
import pandas as pd
import datetime
# %%
me = {
'biweekly_income': 2500,
'expected_raise_pct': 0.05,
'salary_cap_pct': 0.30,
'biweekly_spend': 1500,
'monthly_rent': 1627
}
# %%
accounts_definition = [
{
'name':'chequing',
'rate': 0.05*0.01,
'starting_balance': 2000,
'max_value': 5000, #hold at this value,
'biweekly_contribution':50,
},
{
'name':'tfsa',
'registered':True,
'rate': 5*0.01,
'starting_balance': 20000,
'biweekly_contribution': 300,
'contribution_room': 27000,
'yearly_contrib': 5000 # contrib room added each year
},
{
'name':'rrsp',
'registered':True,
'rate': 5*0.01,
'starting_balance': 0,
'biweekly_contribution': 0,
'max_value': 35000 #for first-time home buy
},
{
'name':'high_interest_savings',
'rate': 2*0.01,
'starting_balance': 2000,
'max_value': 20000, #hold at this value
'biweekly_contribution':300
}
]
# %%
unregistered = {
'name':'unregistered',
'rate': 3*0.01,
'starting_balance': 0,
}
# %%
BW = 3 * 26 # years in 2-week chunks
# %%
#initialize accounts
accounts = {'name':account['name'] for account in accounts_definition}
# %% [markdown]
# ## Income
#
# Pure salary income, with option for it to grow annualy
# %%
income = pd.np.ones(BW)*me.get("biweekly_income")
for year in range(BW//26):
if me.get("salary_cap_pct") and (1 + me.get("salary_cap_pct")) * me.get("biweekly_income") < income[year*26]:
break
else:
income[year*26:]*= 1 + me.get("expected_raise_pct",0)
# %% [markdown]
# ## Distribution
#
# How is this distributed to the accounts
# %%
unregistered_balance = pd.np.ones(BW)*unregistered.get('starting_balance',0)
unregsitered_rate = unregistered.get('rate',0)
# %% [markdown]
# go through time, do all accounts each step
# %%
spending = pd.np.ones(BW) * me.get("biweekly_spend",0)
rent = pd.np.ones(BW) * me.get("monthly_rent",0) * 12/26 # biweekly rent?
income -= spending + rent # this is our takehome
invest = sum(acct.get("biweekly_contribution", 0) for acct in accounts_definition)# amount invested biweekly
for account in accounts_definition:
amount = account.get("biweekly_contribution",0)/invest
account['ratio'] = amount
# %% [markdown]
# ### Rules for distribution
#
# - Try to follow the user inputs, split up money based on ratio between monthly contribs
# - When all the *investment* accounts are full, fill up the *unregistered* account
# - Stop filling RRSP once it's above the 1st time homebuyer amount (would be better to stop before this happens, but ...)
# %%
accts = []
for account in accounts_definition:
balance = | pd.np.ones(BW) | pandas.np.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import time
import seaborn as sns
import pandas as pd
import numpy as np
import os
from sklearn.cluster import KMeans, DBSCAN, Birch, SpectralClustering, AgglomerativeClustering
from sklearn import metrics
from sklearn import preprocessing
import matplotlib.pyplot as plt
from math import floor
# Function that preprocesses the given dataset, and then applies five clustering
# algorithms calculating their execution time, Calinski-Harabaz and Silhouette
# scores and saves their scatter matrix in a 'plots' directory in png format,
# using the given dataset name combined with the algorithm name as file name.
# If the dataset's size is under 100 samples, it also saves a heatmap
# for each algorithm.
def ClusteringAlgorithms(dataset, dataset_name):
# Normalization of the dataset
normalized_dataset = preprocessing.normalize(dataset, norm='l2')
# K-Means
k_means = KMeans(init='k-means++', n_clusters=4, n_init=5, random_state=101010)
# DBSCAN
dbscan = DBSCAN(eps=0.1)
# Birch
birch = Birch(threshold=0.1, n_clusters=4)
# Spectral Clustering
spectral = SpectralClustering(n_clusters=4, random_state=101010)
# Ward
hierarchical_clusters = 100
half_dataset_size = floor(dataset.shape[0]/2)
if hierarchical_clusters > half_dataset_size:
hierarchical_clusters = half_dataset_size
ward = AgglomerativeClustering(n_clusters=hierarchical_clusters, linkage='ward')
clustering_algorithms = [
("K-Means", k_means),
("DBSCAN", dbscan),
("Birch", birch),
("Spectral Clustering", spectral),
("Ward", ward)
]
print("-----------------------{}-----------------------".format(dataset_name))
for name, algorithm in clustering_algorithms:
#print("{:20s}".format(name), end='')
print("{} & ".format(name), end='')
t1 = time.time()
cluster_predict = algorithm.fit_predict(normalized_dataset)
t2 = time.time() - t1
k = len(set(cluster_predict))
print("{} & ".format(k), end='')
print("{:.3f} & ".format(t2), end='')
#print("| k: {:3.0f}, ".format(k),end='')
#print("{:0.3f} seconds, ".format(t2),end='')
if (k>1) and (name is not "Ward"):
metric_CH = metrics.calinski_harabaz_score(normalized_dataset, cluster_predict)
metric_SC = metrics.silhouette_score(normalized_dataset, cluster_predict, metric='euclidean', sample_size=floor(0.1*len(normalized_dataset)), random_state=123456)
else:
metric_CH = 0
metric_SC = 0
#print("CH Index: {:9.3f}, ".format(metric_CH),end='')
#print("SC: {:.5f}".format(metric_SC))
print("{:.3f} & ".format(metric_CH), end='')
print("{:.5f} & ".format(metric_SC), end='')
# Assignment gets turned into DataFrame
column_name = 'cluster'
clusters = pd.DataFrame(cluster_predict,index=dataset.index,columns=[column_name])
# Clusters column gets added to dataset
modified_dataset = pd.concat([dataset, clusters], axis=1)
# Filter those clusters with outliers samples (clusters that represent less than 3% of the dataset)
if len(modified_dataset) > 100:
min_size = floor(modified_dataset.shape[0]*0.01)
else:
min_size = 2
filtered_dataset = modified_dataset[modified_dataset.groupby('cluster').cluster.transform(len) > min_size]
new_k = len(set(filtered_dataset[column_name]))
#print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k,new_k,min_size,len(modified_dataset),len(filtered_dataset)))
print("{} & ".format(new_k), end='')
print("{} ".format(len(modified_dataset)-len(filtered_dataset)) + r"\\")
# Define directory path
script_dir = os.path.dirname(__file__)
# Now the scatter matrix is generated with the appended dataset
sns.set()
variables = list(filtered_dataset)
variables.remove(column_name)
sns_plot = sns.pairplot(filtered_dataset, vars=variables, hue=column_name, palette='Paired', plot_kws={"s": 25}, diag_kind="hist")
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
# Directory is created if does not already exist
plot_dir = os.path.join(script_dir, 'plots/')
plot_name = name+"-"+dataset_name+"-ScatterMatrix.png"
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
# File plot is saved in 'plots' directory
sns_plot.savefig(plot_dir + plot_name)
sns_plot.fig.clear()
# And the heatmap comparing variables values in each cluster
# Directory is created if does not already exist
heatmap_dir = os.path.join(script_dir, 'heatmaps/')
heatmap_name = name+"-"+dataset_name+"-Heatmap.png"
if not os.path.isdir(heatmap_dir):
os.makedirs(heatmap_dir)
# List of clusters that have been defined by the algorithm
clusters_list = list(set(filtered_dataset[column_name]))
# Creation of an empty DataFrame that will be filled with a row for each cluster,
# each column equivalent to the mean value of the variable for the
# examples in the cluster
mean_dataframe = pd.DataFrame()
for cluster in clusters_list:
cluster_dataframe = filtered_dataset[filtered_dataset[column_name] == cluster]
mean_array = dict(np.mean(cluster_dataframe[variables],axis=0))
aux_mean_dataframe = pd.DataFrame(mean_array,index=[str(cluster)])
mean_dataframe = | pd.concat([mean_dataframe,aux_mean_dataframe]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Module containing logic for table and graph with Ownership History
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from pandas import DataFrame
from PyQt5.QtCore import QObject, pyqtSlot
from source.util import Assertor
from .table_model import TableModel
from .model import Model
from ..graphics import BarChartWithLine
class HistoryModel(Model):
"""
Implementation of model for Ownership history
"""
_finn_history_keys = ["finn_adresse", "eieform", "kommunenr", "gardsnr", "bruksnr",
"bruksenhetsnr", "seksjonsnr", "historikk"]
def __init__(self, parent: QObject):
"""
Constructor / Instantiation of class
Parameters
----------
parent : QObject
Parent view for which the model in to be linked
"""
Assertor.assert_data_types([parent], [QObject])
super().__init__(parent)
self.keys = None
self.values = None
self.bar_plot = None
self.table_view_mapping = None
@pyqtSlot()
def add_finn_history(self, postfix: str):
"""
method for adding Finn ownership history to front end
Parameters
----------
postfix : str
index if used in naming of widgets
"""
Assertor.assert_data_types([postfix], [str])
grandparent = self.parent.parent
history_data = {}
if grandparent.finn_model.finn_data:
for key, val in grandparent.finn_model.finn_data.items():
if key[:-len(postfix)] in self._finn_history_keys:
if key[:-len(postfix)] == "historikk":
history_data.update(
{key: val.to_dict() if isinstance(val, DataFrame) else val})
else:
history_data.update({key: val})
self.data.update(history_data)
for key in self._finn_history_keys:
if key == "historikk":
BarChartWithLine.clear_graphics(self.parent.ui.graphics_view_historikk,
self.parent.ui.table_view_historikk)
if key + postfix in self.data.keys() and self.data["historikk" + postfix]:
# table
history_data_model = TableModel( | DataFrame(self.data[key + postfix]) | pandas.DataFrame |
from graph_build.graph_noise_join.conf import GraphNoiseJoinConf
import logging
import os
import fiona
import math
from pyproj import CRS
import numpy as np
import pandas as pd
import geopandas as gpd
import graph_build.graph_noise_join.utils as utils
import common.igraph as ig_utils
from common.igraph import Edge as E
from graph_build.graph_noise_join.schema import SamplingGdf as S
from typing import Dict
log = logging.getLogger('noise_graph_join')
def noise_graph_join(
edge_gdf: gpd.GeoDataFrame,
sampling_interval: float,
noise_layers: Dict[str, gpd.GeoDataFrame],
nodata_layer: gpd.GeoDataFrame,
b_debug: bool = False,
debug_gpkg: str = ''
) -> gpd.GeoDataFrame:
# create sampling points
edge_gdf = utils.add_sampling_points_to_gdf(edge_gdf, sampling_interval=sampling_interval)
point_gdf = utils.explode_sampling_point_gdf(edge_gdf, points_geom_column=S.sampling_points)
# select only unique sampling points for sampling
point_gdf = utils.add_unique_geom_id(point_gdf, log)
uniq_point_gdf = point_gdf.drop_duplicates(S.xy_id, keep='first')
initial_sampling_count = len(uniq_point_gdf.index)
log.info(f'created {len(uniq_point_gdf)} unique sampling points ({round(len(point_gdf)/point_gdf[S.edge_id].nunique(),2)} per edge)')
# add boolean column indicating wether sampling point is within potential nodata zone
uniq_point_gdf = utils.add_inside_nodata_zone_column(uniq_point_gdf, nodata_layer, log)
# columns: edge_id, sample_len, xy_id, nodata_zone (1 / na)
if b_debug:
if os.path.exists(debug_gpkg):
os.remove(debug_gpkg)
log.info('exporting edges and sampling points for debugging')
edge_gdf.drop(columns=[S.sampling_points]).to_file(debug_gpkg, layer='graph_edges', driver='GPKG')
uniq_point_gdf.to_file(debug_gpkg, layer='sampling_points', driver='GPKG')
# spatially join noise values by sampling points from a set of noise surface layers
noise_samples = utils.sjoin_noise_values(uniq_point_gdf, noise_layers, log)
noise_samples[S.no_noise_values] = noise_samples.apply(lambda row: utils.all_noise_values_none(row, noise_layers), axis=1)
utils.log_none_noise_stats(log, noise_samples)
# add column indicating wether sampling points is both located in potential nodata_zone and is missing noise values
noise_samples[S.missing_noises] = noise_samples.apply(lambda row: True if (row[S.nodata_zone] == 1) & (row[S.no_noise_values] == True) else False, axis=1)
normal_samples = noise_samples[noise_samples[S.missing_noises] == False].copy()
if b_debug:
noise_samples.to_file(debug_gpkg, layer='sampling_points_noise', driver='GPKG')
missing_noises_count = len(noise_samples[noise_samples[S.missing_noises] == True])
missing_share = round(100 * missing_noises_count/len(noise_samples.index), 2)
log.info(f'found {missing_noises_count} ({missing_share} %) sampling points for which noise values need to be interpolated')
# define columns for sampled values
sampling_columns = [S.xy_id, S.road, S.train, S.tram, S.metro, S.n_max, S.n_max_sources, S.n_max_adj]
if missing_noises_count == 0:
log.info('processing noise samples')
all_samples = utils.aggregate_noise_values(normal_samples)
all_samples = all_samples[sampling_columns]
else:
# interpolate noise values for sampling points missing them in nodata zones
interpolated_samples = noise_samples[noise_samples[S.missing_noises] == True][[S.xy_id, S.geometry]].copy()
interpolated_samples[S.offset_sampling_points] = [utils.get_sampling_points_around(point, distance=7, count=20) for point in interpolated_samples[S.geometry]]
offset_sampling_points = utils.explode_offset_sampling_point_gdf(interpolated_samples, S.offset_sampling_points)
if b_debug:
offset_sampling_points.to_file(debug_gpkg, layer='offset_sampling_points', driver='GPKG')
# join noise values to offset sampling points
offset_sampling_point_noises = utils.sjoin_noise_values(offset_sampling_points, noise_layers, log)
if b_debug:
offset_sampling_point_noises.to_file(debug_gpkg, layer='offset_sampling_point_noises', driver='GPKG')
# calculate average noise values per xy_id from offset sampling points
offset_samples_by_xy_id = offset_sampling_point_noises.groupby(by=S.xy_id)
row_accumulator = []
for xy_id, group in offset_samples_by_xy_id:
samples = group.copy()
samples = samples.fillna(0)
interpolated_sample = {name: samples[name].quantile(.7, interpolation='nearest') for name in noise_layers.keys()}
interpolated_sample[S.xy_id] = xy_id
row_accumulator.append(interpolated_sample)
interpolated_noise_samples = pd.DataFrame(row_accumulator)
interpolated_noise_samples = interpolated_noise_samples.replace(0, np.nan)
# add newly sampled noise values to sampling points missing them
interpolated_samples = pd.merge(interpolated_samples.drop(columns=[S.offset_sampling_points]), interpolated_noise_samples, on=S.xy_id, how='left')
if b_debug:
interpolated_samples.to_file(debug_gpkg, layer='interpolated_samples', driver='GPKG')
# add maximum noise values etc. to sampling points
log.info('processing noise samples')
normal_samples = utils.aggregate_noise_values(normal_samples)
interpolated_samples = utils.aggregate_noise_values(interpolated_samples, prefer_syke=True)
# combine sampling point dataframes to one
normal_samples = normal_samples[sampling_columns]
interpolated_samples = interpolated_samples[sampling_columns]
all_samples = | pd.concat([normal_samples, interpolated_samples], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import codecs
import datetime
import logging
import logging.config
import os
import re
import time
import numpy as np
import sharkpylib
from sharkpylib import mappinglib
from sharkpylib.file import txt_reader
from sharkpylib.file.file_handlers import Directory
from sharkpylib.file.file_handlers import ListDirectory
from sharkpylib.file.file_handlers import MappingDirectory
from sharkpylib.qc.mask_areas import MaskAreasDirectory
try:
import pandas as pd
except:
pass
import sys
parent_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if parent_directory not in sys.path:
sys.path.append(parent_directory)
from sharkpylib import gismo
class TavastlandException(Exception):
"""
Blueprint for error message.
code is for external mapping of exceptions. For example if a GUI wants to
handle the error text for different languages.
"""
code = None
message = ''
def __init__(self, message='', code=''):
self.message = '{}: {}'.format(self.message, message)
if code:
self.code = code
class TavastlandExceptionCorrupedFile(TavastlandException):
"""
"""
code = ''
message = 'Corruped file'
class TavastlandExceptionNoCO2data(TavastlandException):
"""
"""
code = ''
message = ''
class TavastlandExceptionNoMatchWhenMerging(TavastlandException):
"""
"""
code = ''
message = ''
class File(object):
def __init__(self, file_path='', **kwargs):
self._set_logger(kwargs.get('logger'))
self.file_path = file_path
self.file_directory = os.path.dirname(self.file_path)
self.file_name = os.path.basename(self.file_path)
self.file_id = self.file_name
self.df = pd.DataFrame()
self.time_start = None
self.time_end = None
self.data_loaded = None
self.time_in_file_name_formats = ['TP_%Y%m%d%H%M%S.mit']
self._add_file_path_time()
self.time_frozen_between = []
if kwargs.get('load_file'):
self.load_file()
def _set_logger(self, logger):
if logger:
self.logger = logger
else:
logging.config.fileConfig('logging.conf')
self.logger = logging.getLogger('timedrotating')
def _len_header_equals_len_data(self, file_path):
with open(file_path) as fid:
for r, line in enumerate(fid):
split_line = line.split('\t')
if r==0:
header = split_line
else:
if len(header) == len(split_line):
return True
return False
def _add_file_path_time(self):
self.file_path_time = None
self.file_path_year = None
self.file_path_possible_years = []
for time_format in self.time_in_file_name_formats:
try:
time_object = datetime.datetime.strptime(self.file_name, time_format)
self.file_path_time = time_object
break
except ValueError:
# logger.debug('No time in file path for file: {}'.format(self.file_path))
pass
# Find year
result = re.findall('\d{4}', self.file_name)
if result:
self.file_path_year = int(result[0])
self.file_path_possible_years = [self.file_path_year-1, self.file_path_year, self.file_path_year+1]
def _delete_columns(self):
if 'Date' in self.df.columns:
self.df.drop(['Date'], axis=1, inplace=True)
# Time is removed in method _add_columns
elif 'PC Date' in self.df.columns:
self.df.drop(['PC Date', 'PC Time'], axis=1, inplace=True)
if 'Lat' in self.df.columns:
self.df.drop(['Lat', 'Lon'], axis=1, inplace=True)
elif 'latitude' in self.df.columns:
self.df.drop(['latitude', 'longitude'], axis=1, inplace=True)
def _add_columns(self):
# Time
if 'Date' in self.df.columns:
time_str = self.df['Date'] + ' ' + self.df['Time'].copy()
self.df.drop('Time', axis=1, inplace=True)
self.df['time'] = pd.to_datetime(time_str, format='%d.%m.%Y %H:%M:%S')
elif 'PC Date' in self.df.columns:
time_str = self.df['PC Date'] + ' ' + self.df['PC Time']
self.df['time'] = pd.to_datetime(time_str, format='%d/%m/%y %H:%M:%S')
# Position
if 'Lat' in self.df.columns:
self.df['lat'] = self.df['Lat'].apply(as_float)
self.df['lon'] = self.df['Lon'].apply(as_float)
elif 'latitude' in self.df.columns:
self.df['lat'] = self.df['latitude'].apply(as_float)
self.df['lon'] = self.df['longitude'].apply(as_float)
else:
self.df['lat'] = np.nan
self.df['lon'] = np.nan
self.df['source_file'] = self.file_name
def _remove_duplicates(self):
# print('REMOVE DUPLICATES', self.file_id)
# First save missing periodes
dub_boolean = self.df.duplicated('time', keep=False)
between = []
missing_period = []
for i, t0, b0, t1, b1 in zip(self.df.index[:-1], self.df['time'].values[:-1], dub_boolean.values[:-1],
self.df['time'].values[1:], dub_boolean.values[1:]):
if i == 0 and b0:
missing_period.append('?')
if b1 and not b0:
# t0s = pd.to_datetime(t0).strftime('%Y%m%d%H%M%S')
# missing_period.append(t0s)
missing_period.append(t0)
elif b0 and not b1:
# t1s = pd.to_datetime(t1).strftime('%Y%m%d%H%M%S')
# missing_period.append(t1s)
missing_period.append(t1)
# print(missing_period)
if len(missing_period) == 2:
between.append(missing_period)
# between.append('-'.join(missing_period))
missing_period = []
if missing_period:
missing_period.append('?')
between.append(missing_period)
# between.append('-'.join(missing_period))
# print('between:', len(between))
self.time_frozen_between = between
# Now drop all duplicates
self.df.drop_duplicates('time', keep=False, inplace=True)
def valid_data_line(self, line):
if 'DD.MM.YYYY' in line:
# print('DD.MM.YYYY', self.file_path)
return False
if not line.strip():
# print('BLANK', self.file_path)
return False
return True
def load_file(self, **kwargs):
if not os.path.exists(self.file_path):
raise FileNotFoundError
header = []
data = []
with codecs.open(self.file_path, encoding=kwargs.get('encoding', 'cp1252')) as fid:
for row, line in enumerate(fid):
split_line = line.strip('\n\r').split(kwargs.get('sep', '\t'))
split_line = [item.strip() for item in split_line]
if row == 1 and header:
if len(header) != len(split_line):
header = header[:len(split_line)]
if not header:
header = split_line
else:
if len(header) != len(split_line):
raise TavastlandExceptionCorrupedFile
self.logger.warning('invalid file: {}'.format(row, self.file_path))
self.data_loaded = False
return False
if not self.valid_data_line(line):
self.logger.warning('Removing invalid line {} from file: {}'.format(row, self.file_path))
continue
data.append(split_line)
self.original_columns = header[:]
self.df = pd.DataFrame(data, columns=header)
self._add_columns()
self._remove_duplicates()
self.filter_data()
self._delete_columns()
self.data_loaded = True
return True
def filter_data(self):
"""
Filters the data from unwanted lines etc.
:return:
"""
combined_keep_boolean = pd.Series([True]*len(self.df))
keep_boolean = ~self.df[self.original_columns[0]].str.contains('DD.MM.YYYY')
combined_keep_boolean = combined_keep_boolean & keep_boolean
keep_boolean = ~self.df[self.original_columns[0]].str.contains('.1904')
combined_keep_boolean = combined_keep_boolean & keep_boolean
keep_boolean = self.df['time'] <= datetime.datetime.now()
combined_keep_boolean = combined_keep_boolean & keep_boolean
removed = self.df.loc[~combined_keep_boolean]
if len(removed):
self.logger.warning('{} lines removed from file {}'.format(len(removed), self.file_path))
self.df = self.df.loc[combined_keep_boolean, :]
def clean_file(self, export_directory):
"""
Loads file (including filter data) and saves to the export directory.
:return
"""
# print(export_directory)
if export_directory == self.file_directory:
raise TavastlandException('Cannot export to the same directory!')
if not os.path.exists(export_directory):
os.makedirs(export_directory)
if self.data_loaded is None:
self.load_file()
export_file_path = os.path.join(export_directory, self.file_name)
self.df[self.original_columns].to_csv(export_file_path, index=False, sep='\t')
def get_df(self):
if self.data_loaded is None:
self.load_file()
return self.df
def get_time_range(self):
def get_time(line):
date = re.findall('\d{2}\.\d{2}\.\d{4}', line)
time = re.findall('\d{2}:\d{2}:\d{2}', line)
if date and time:
return datetime.datetime.strptime(date[0] + time[0], '%d.%m.%Y%H:%M:%S')
date = re.findall('\d{2}/\d{2}/\d{2}', line)
time = re.findall('\d{2}:\d{2}:\d{2}', line)
if date and time:
return datetime.datetime.strptime(date[0] + time[0], '%d/%m/%y%H:%M:%S')
self.time_start = None
self.time_end = None
if self.data_loaded:
self.time_start = self.df.time.values[0]
self.time_end = self.df.time.values[-1]
return self.time_start, self.time_end
else:
with codecs.open(self.file_path) as fid:
for r, line in enumerate(fid):
if self.valid_data_line(line):
if r == 0:
continue
elif not self.time_start:
time = get_time(line)
self.time_start = time
self.time_end = get_time(line)
return self.time_start, self.time_end
def in_time_range(self, datetime_object):
if not self.time_start:
self.get_time_range()
return (datetime_object >= self.time_start) & (datetime_object <= self.time_end)
def check_if_valid_file_name(self):
"""
External method.
Returns True if file_name follows the structure(s) described in method.
:param file_name:
:return:
"""
raise NotImplementedError
def warnings(self):
"""
Returns a list of strange things found in file. Strange things kan be handled.
:return: list with description of the warnings.
"""
raise NotImplementedError
def get_file_errors(self):
"""
Returns a list of errors in file if any. Errors are obvious faults that can not be handled.
:return list with description of the errors.
"""
raise NotImplementedError
def _get_file_errors(self):
error_list = []
if not self._len_header_equals_len_data(self.file_path):
text = 'Header is not the same length as data in file: {}.'.format(self.file_name)
error_list.append(text)
return error_list
class MITfile(File):
def __init__(self, file_path='', **kwargs):
File.__init__(self, file_path, **kwargs)
def check_if_valid_file_name(self, file_name):
"""
External method.
Returns True if file_name follows the structure(s) described in method.
:param file_name:
:return:
"""
if not file_name.endswith('.mit'):
return False
return True
def warnings(self):
"""
Returns a list of strange things found in file. Strange things kan be handled.
:return: list with description of the warnings.
"""
raise NotImplementedError
def get_file_errors(self):
"""
Returns a list of errors in file if any. Errors are obvious faults that can not be handled.
:return list with description of the errors.
"""
error_list = self._get_file_errors()
# Check time
start, end = self.get_time_range()
d = datetime.datetime(1980, 1, 1)
this_year = datetime.datetime.now().year
if not all([start, end]):
text = 'Could not find time in file {}.'.format(self.file_name)
error_list.append(text)
else:
if start < d:
text = 'Start data is too early in file {}. Before {}'.format(self.file_name, d.strftime('%Y%m%d'))
error_list.append(text)
# continue
if start > end:
text = 'Start time > end time in file {}.'.format(self.file_name)
error_list.append(text)
# continue
if any([start.year > this_year, end.year > this_year]):
text = 'Start year or end year is later than current year in file {}.'.format(self.file_name)
error_list.append(text)
# continue
if any([start.year == 1904, end.year == 1904]):
text = 'Start year or end year is 1904 in file {}.'.format(self.file_name)
self.logger.info(text)
error_list.append(text)
if error_list:
self.logger.info('; '.join(error_list))
return error_list
class CO2file(File):
def __init__(self, file_path='', **kwargs):
File.__init__(self, file_path, **kwargs)
def check_if_valid_file_name(self, file_name):
"""
External method.
Returns True if file_name follows the structure(s) described in method.
:param file_name:
:return:
"""
if not file_name.endswith('dat.txt'):
return False
return True
def warnings(self):
"""
Returns a list of strange things found in file. Strange things kan be handled.
:return: list with description of the warnings.
"""
raise NotImplementedError
def get_file_errors(self):
"""
Returns a list of errors in file if any. Errors are obvious faults that can not be handled.
:return list with description of the errors.
"""
error_list = self._get_file_errors()
# Check time
start, end = self.get_time_range()
d = datetime.datetime(1980, 1, 1)
this_year = datetime.datetime.now().year
if not all([start, end]):
text = 'Could not find time in file {}.'.format(self.file_name)
error_list.append(text)
if error_list:
self.logger.info('; '.join(error_list))
return error_list
class FileHandler(object):
def __init__(self, **kwargs):
self._set_logger(kwargs.get('logger'))
self.logger.debug('Starting FileHandler for Tavastland')
self.directories = {}
self.directories['mit'] = kwargs.get('mit_directory', None)
self.directories['co2'] = kwargs.get('co2_directory', None)
self.export_directory = kwargs.get('export_directory', None)
self.save_directory = None
self.current_merge_data = | pd.DataFrame() | pandas.DataFrame |
import typing
import unittest
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.model_selection
from autoPyTorch.datasets.tabular_dataset import DataTypes, TabularDataset
from autoPyTorch.utils.backend import create
from autoPyTorch.utils.pipeline import get_dataset_requirements
class DataFrameTest(unittest.TestCase):
def runTest(self):
df = pd.DataFrame([['a', 0.1, 1], ['b', 0.2, np.nan]])
target_df = | pd.Series([1, 2]) | pandas.Series |
__all__ = ["spectrometer_sensitivity"]
# standard library
from typing import List, Union
# dependent packages
import numpy as np
import pandas as pd
from .atmosphere import eta_atm_func
from .instruments import eta_Al_ohmic_850, photon_NEP_kid, window_trans
from .physics import johnson_nyquist_psd, rad_trans, T_from_psd
from .physics import c, h, k
from .filter import eta_filter_lorentzian, eta_filter_csv, weighted_average
# type aliases
ArrayLike = Union[np.ndarray, List[float], List[int], float, int]
# main functions
def spectrometer_sensitivity(
filter_transmission_csv: str = "",
F: ArrayLike = 350.0e9,
R: float = 500.0,
F_res: int = 30,
overflow: int = 80,
pwv: float = 0.5,
EL: float = 60.0,
eta_M1_spill: ArrayLike = 0.99,
eta_M2_spill: ArrayLike = 0.90,
eta_wo_spill: ArrayLike = 0.99,
n_wo_mirrors: int = 4.0,
window_AR: bool = True,
eta_co: ArrayLike = 0.65,
eta_lens_antenna_rad: ArrayLike = 0.81,
eta_circuit: ArrayLike = 0.32,
eta_IBF: ArrayLike = 0.5,
KID_excess_noise_factor: float = 1.1,
theta_maj: ArrayLike = 22.0 * np.pi / 180.0 / 60.0 / 60.0,
theta_min: ArrayLike = 22.0 * np.pi / 180.0 / 60.0 / 60.0,
eta_mb: ArrayLike = 0.6,
telescope_diameter: float = 10.0,
Tb_cmb: ArrayLike = 2.725,
Tp_amb: ArrayLike = 273.0,
Tp_cabin: ArrayLike = 290.0,
Tp_co: ArrayLike = 4.0,
Tp_chip: ArrayLike = 0.12,
snr: float = 5.0,
obs_hours: float = 10.0,
on_source_fraction: float = 0.4 * 0.9,
on_off: bool = True,
):
"""Calculate the sensitivity of a spectrometer.
Parameters which are functions of frequency can be a vector (see Parameters).
Output is a pandas DataFrame which containts results of simulation (see Returns).
Parameters
----------
filter_transmission_csv
Optional. File location of a .csv file with transmission for filter channels
Header: Frequencies
rows: filter channels with transmission per column frequency
F
Used when filter_transmission_csv isn't used.
Frequency of the astronomical signal. Units: Hz.
R
Used when filter_transmission_csv isn't used.
Spectral resolving power in F/W_F where W_F is equivalent bandwidth and HWHM
of filters.
Units: None. See also: http://www.astrosurf.com/buil/us/spe2/hresol7.htm
F_res
Used when filter_transmission_csv isn't used.
The number of frequency bins within a FWHM
Units: none.
Overflow
Used when filter_transmission_csv isn't used.
The number of extra FHWM's below the first and above the last channel
Units: none.
pwv
Precipitable water vapour. Units: mm.
EL
Telescope elevation angle. Units: degrees.
eta_M1_spill
Spillover efficiency at the telescope primary mirror. Units: None.
eta_M2_spill
Spillover efficiency at the telescope secondary mirror. Units: None.
eta_wo_spill
Product of all spillover losses in the warm optics in the cabin. Units: None.
n_wo_mirrors
Number of cabin optics excluding telescope M1 and M2. Units: None.
window_AR
Whether the window is supposed to be coated by Ar (True) or not (False).
eta_co
Product of following. Units: None.
(1) Cold spillover.
(2) Cold ohmic losses.
(3) Filter transmission loss.
eta_lens_antenna_rad
The loss at chip temperature, *that is not in the circuit.*
Product of the following. Units: None.
(1) Front-to-back ratio of the lens-antenna on the chip (defalut: 0.93).
(2) Reflection efficiency at the surface of the lens (default: 0.9).
(3) Matching efficiency, due to the mismatch (default: 0.98).
(4) Spillover efficiency of the lens-antenna (default: 0.993).
These values can be found in D2_2V3.pdf, p14.
eta_circuit
The loss at chip temperature, *in the circuit.*. Units: None.
eta_IBF
Fraction of the filter power transmission that is within the filter
channel bandwidth. Units: None. The rest of the power is cross talk,
picking up power that is in the bands of neighboring channels.
This efficiency applies to the coupling to astronomical line signals.
This efficiency does not apply to the coupling to continuum,
including the the coupling to the atmosphere for calculating the NEP.
KID_excess_noise_factor
Need to be documented. Units: None.
theta_maj
The HPBW along the major axis, assuming a Gaussian beam. Units: radians.
theta_min
The HPBW along the minor axis, assuming a Gaussian beam. Units: radians.
eta_mb
Main beam efficiency. Units: None. Note that eta_mb includes
the following terms from D2_2V3.pdf from Shahab's report.
because a decrease in these will launch the beam to the sky
but not couple it to the point source (See also FAQ.).
(1) eta_Phi.
(2) eta_amp.
telescope_diameter
Diameter of the telescope. Units: m.
Tb_cmb
Brightness temperature of the CMB. Units: K.
Tp_amb
Physical temperature of the atmosphere and ambient environment
around the telescope. Units: K.
Tp_cabin
Physical temperature of the telescope cabin. Units: K.
Tp_co
Physical temperature of the cold optics inside the cryostat. Units: K.
Tp_chip
Physical temperature of the chip. Units: K.
snr
Target signal to noise to be reached (for calculating the MDLF). Units: None.
obs_hours
Observing hours, including off-source time and the slew overhead
between on- and off-source. Units: hours.
on_source_fraction
Fraction of the time on source (between 0. and 1.). Units: None.
on_off
If the observation involves on_off chopping, then the SNR degrades
by sqrt(2) because the signal difference includes the noise twice.
Returns
----------
F
Best-fit center frequencies from filter_transmission_csv.
Same as input if filter_transmission_csv isn't used. Units: Hz.
pwv
Same as input.
EL
Same as input
eta_atm
Atmospheric transmission within the FHWM of the channel. Units: None.
eta_atm_cont
Atmospheric transmission across the entire widht of the filter. Units: None.
R
best-fit F/FWHM fitted from filter_transmission_csv
Equivalent bandwidth within F/R if filter_transmission_csv isn't used.
Units: None
W_F_spec
Best-fit Equivalent bandwith within the FWHM from filter_transmission_csv
Equivalent bandwidth within F/R if filter_transmission_csv isn't used.
Units: Hz.
W_F_cont
Equivalent bandwidth of 1 channel including the power coupled
outside of the filter channel band. Units: Hz.
theta_maj
Same as input.
theta_min
Same as input.
eta_a
Aperture efficiency. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_mb
Main beam efficiency. Units: None.
eta_forward
Forward efficiency within the FHWM of the channel. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_forward_cont
Forward efficiency across the entire widht of the filter. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_sw
Coupling efficiency from a spectral point source to the cryostat window. Units: None.
eta_sw_cont
Coupling efficiency from a continuum point source to the cryostat window. Units: None.
eta_window
Transmission of the cryostat window within the FHWM of the channel.
Units: None.
eta_window_cont
Transmission of the cryostat window across the entire widht of the filter.
Units: None.
eta_inst
Instrument optical efficiency within the FHWM of the channel. Units: None.
See also: https://arxiv.org/abs/1901.06934
eta_inst_cont
Instrument optical efficiency across the entire widht of the filter. Units: None.
See also: https://arxiv.org/abs/1901.06934
eta_circuit
Equivalent efficiency of Lorentzian fit from filter_transmission.csv.
Same as input if filter_transmission.csv isn't used. Units: None
Tb_sky
Planck brightness temperature of the sky. Units: K.
Tb_M1
Planck brightness temperature looking into the telescope primary. Units: K.
Tb_M2
Planck brightness temperature looking into the telescope secondary,
including the spillover to the cold sky. Units: K.
Tb_wo
Planck brightness temperature looking into the warm optics. Units: K.
Tb_window
Planck brightness temperature looking into the window. Units: K.
Tb_co
Planck brightness temperature looking into the cold optis. Units: K.
Tb_filter
Planck brightness temperature looking into the lens from the filter. Units: K.
Tb_KID
Planck brightness temperature looking into the filter from the KID. Units: K.
Pkid
Power absorbed by the KID. Units: W.
Pkid_sky
Power of the sky loading to the KID. Units: W
Pkid_warm
Power of the warm optics loading to the KID. Units: W
Pkid_cold
Power of the cold optics and circuit loading to the KID. Units: W
n_ph
Photon occupation number within the FHWM of the channel. Units: None.
See also: http://adsabs.harvard.edu/abs/1999ASPC..180..671R
n_ph_cont
Photon occupation number across the entire widht of the filter. Units: None.
See also: http://adsabs.harvard.edu/abs/1999ASPC..180..671R
NEPkid
Noise equivalent power at the KID with respect to the absorbed power.
Units: W Hz^0.5.
NEPinst
Instrumnet NEP within within the FHWM of the channel. Units: W Hz^0.5.
See also: https://arxiv.org/abs/1901.06934
NEPinst_cont
Instrumnet NEP across the entire widht of the filter. Units: W Hz^0.5.
See also: https://arxiv.org/abs/1901.06934
NEFD_line
Noise Equivalent Flux Density for couploing to a line that is not wider
than the filter bandwidth. Units: W/m^2/Hz * s^0.5.
NEFD_continuum
Noise Equivalent Flux Density for couploing to a countinuum source.
Units: W/m^2/Hz * s^0.5.
NEF
Noise Equivalent Flux within the FHWM of the channel. Units: W/m^2 * s^0.5.
NEF_cont
Noise Equivalent Flux across the entire widht of the filter. Units: W/m^2 * s^0.5.
MDLF
Minimum Detectable Line Flux. Units: W/m^2.
MS
Mapping Speed. Units: arcmin^2 mJy^-2 h^-1.
snr
Same as input.
obs_hours
Same as input.
on_source_fraction
Same as input.
on_source_hours
Observing hours on source. Units: hours.
equivalent_Trx
Equivalent receiver noise temperature within the FHWM of the channel. Units: K.
at the moment this assumes Rayleigh-Jeans!
equivalent_Trx_cont
Equivalent receiver noise temperature across the entire widht of the filter.
Units: K.
at the moment this assumes Rayleigh-Jeans!
chi_sq
The Chi Squared value of the Lorentzian fit from filter_transmission_csv
Zero when filter_transmission_csv is not used. Units: None.
Notes
-----
The parameters to calculate the window transmission / reflection
is hard-coded in the function window_trans().
"""
# Filter approximation or read from csv?
if filter_transmission_csv == "":
# Generate filter
(
eta_filter,
eta_inband,
F,
F_int,
W_F_int,
box_height,
box_width,
chi_sq,
) = eta_filter_lorentzian(F, F / R, eta_circuit, F_res, overflow)
R = F / box_width
else:
# Read from csv
(
eta_filter,
eta_inband,
F,
F_int,
W_F_int,
box_height,
box_width,
chi_sq,
) = eta_filter_csv(filter_transmission_csv)
# Equivalent Bandwidth of 1 channel, modelled as a box filter.
# Used for calculating loading and coupling to a continuum source
W_F_cont = box_width / eta_IBF
# Used for calculating coupling to a line source,
# with a linewidth not wider than the filter channel
W_F_spec = box_width
# Efficiency of filter channels
eta_circuit = box_height
# #############################################################
# 1. Calculating loading power absorbed by the KID, and the NEP
# #############################################################
# .......................................................
# Efficiencies for calculating sky coupling
# .......................................................
# Ohmic loss as a function of frequency, from skin effect scaling
eta_Al_ohmic = 1.0 - (1.0 - eta_Al_ohmic_850) * np.sqrt(F_int / 850.0e9)
eta_M1_ohmic = eta_Al_ohmic
eta_M2_ohmic = eta_Al_ohmic
# Collect efficiencies at the same temperature
eta_M1 = eta_M1_ohmic * eta_M1_spill
eta_wo = eta_Al_ohmic**n_wo_mirrors * eta_wo_spill
# Forward efficiency: does/should not include window loss
# because it is defined as how much power out of
# the crystat window couples to the cold sky.
eta_forward_spec = weighted_average(
eta_M1 * eta_M2_ohmic * eta_M2_spill * eta_wo + (1.0 - eta_M2_spill) * eta_wo,
eta_inband,
)
eta_forward_cont = weighted_average(
eta_M1 * eta_M2_ohmic * eta_M2_spill * eta_wo + (1.0 - eta_M2_spill) * eta_wo,
eta_filter,
)
# Calcuate eta at center of integration bin
eta_atm = eta_atm_func(F=F_int, pwv=pwv, EL=EL)
# Johnson-Nyquist Power Spectral Density (W/Hz)
# for the physical temperatures of each stage
psd_jn_cmb = johnson_nyquist_psd(F=F_int, T=Tb_cmb)
psd_jn_amb = johnson_nyquist_psd(F=F_int, T=Tp_amb)
psd_jn_cabin = johnson_nyquist_psd(F=F_int, T=Tp_cabin)
psd_jn_co = johnson_nyquist_psd(F=F_int, T=Tp_co)
psd_jn_chip = johnson_nyquist_psd(F=F_int, T=Tp_chip)
# Optical Chain
# Sequentially calculate the Power Spectral Density (W/Hz) at each stage.
# Uses only basic radiation transfer: rad_out = eta*rad_in + (1-eta)*medium
psd_sky = rad_trans(rad_in=psd_jn_cmb, medium=psd_jn_amb, eta=eta_atm)
psd_M1 = rad_trans(rad_in=psd_sky, medium=psd_jn_amb, eta=eta_M1)
psd_M2 = rad_trans(rad_in=psd_M1, medium=psd_jn_amb, eta=eta_M2_ohmic)
psd_M2_spill = rad_trans(rad_in=psd_M2, medium=psd_sky, eta=eta_M2_spill)
psd_wo = rad_trans(rad_in=psd_M2_spill, medium=psd_jn_cabin, eta=eta_wo)
[psd_window, eta_window] = window_trans(
F=F_int,
psd_in=psd_wo,
psd_cabin=psd_jn_cabin,
psd_co=psd_jn_co,
window_AR=window_AR,
)
psd_co = rad_trans(rad_in=psd_window, medium=psd_jn_co, eta=eta_co)
psd_filter = rad_trans(rad_in=psd_co, medium=psd_jn_chip, eta=eta_lens_antenna_rad)
# Instrument optical efficiency as in JATIS 2019
# (eta_inst can be calculated only after calculating eta_window)
eta_inst_spec = (
eta_lens_antenna_rad
* eta_co
* eta_circuit
* weighted_average(eta_window, eta_inband)
)
eta_inst_cont = (
eta_lens_antenna_rad
* eta_co
* eta_circuit
* weighted_average(eta_window, eta_filter)
)
# Calculating Sky loading, Warm loading and Cold loading individually for reference
# (Not required for calculating Pkid, but serves as a consistency check.)
# .................................................................................
# Sky loading
psd_KID_sky_1 = (
psd_sky
* eta_M1
* eta_M2_spill
* eta_M2_ohmic
* eta_wo
* eta_lens_antenna_rad
* eta_co
* eta_window
)
psd_KID_sky_2 = (
rad_trans(0, psd_sky, eta_M2_spill)
* eta_M2_ohmic
* eta_wo
* eta_lens_antenna_rad
* eta_co
* eta_window
)
psd_KID_sky = psd_KID_sky_1 + psd_KID_sky_2
skycoup = weighted_average(
psd_KID_sky / psd_sky, eta_filter
) # To compare with Jochem
# Warm loading
psd_KID_warm = (
window_trans(
F=F_int,
psd_in=rad_trans(
rad_trans(
rad_trans(
rad_trans(0, psd_jn_amb, eta_M1), 0, eta_M2_spill
), # sky spillover does not count for warm loading
psd_jn_amb,
eta_M2_ohmic,
),
psd_jn_cabin,
eta_wo,
),
psd_cabin=psd_jn_cabin,
psd_co=0,
window_AR=window_AR,
)[0]
* eta_co
* eta_lens_antenna_rad
)
# Cold loading
psd_KID_cold = rad_trans(
rad_trans(
window_trans(
F=F_int,
psd_in=0.0,
psd_cabin=0.0,
psd_co=psd_jn_co,
window_AR=window_AR,
)[0],
psd_jn_co,
eta_co,
),
psd_jn_chip,
eta_lens_antenna_rad,
)
# Loadig power absorbed by the KID
# .............................................
""" if np.all(psd_filter != psd_KID_sky + psd_KID_warm + psd_KID_cold):
print("WARNING: psd_filter != psd_KID_sky + psd_KID_warm + psd_KID_cold")
"""
Pkid = np.sum(psd_filter * W_F_int * eta_filter, axis=1)
Pkid_sky = np.sum(psd_KID_sky * W_F_int * eta_filter, axis=1)
Pkid_warm = np.sum(psd_KID_warm * W_F_int * eta_filter, axis=1)
Pkid_cold = np.sum(psd_KID_cold * W_F_int * eta_filter, axis=1)
# Photon + R(ecombination) NEP of the KID
# .............................................
n_ph_spec = weighted_average(psd_filter / (h * F_int), eta_inband) * eta_circuit
n_ph_cont = weighted_average(psd_filter / (h * F_int), eta_filter) * eta_circuit
NEPkid = (
photon_NEP_kid(F_int, psd_filter * W_F_int * eta_filter, W_F_int)
* KID_excess_noise_factor
)
# Instrument NEP as in JATIS 2019
# .............................................
NEPinst_spec = NEPkid / eta_inst_spec # Instrument NEP
NEPinst_cont = NEPkid / eta_inst_cont # Instrument NEP
# ##############################################################
# 2. Calculating source coupling and sensitivtiy (MDLF and NEFD)
# ##############################################################
# Efficiencies
# .........................................................
Ag = np.pi * (telescope_diameter / 2.0) ** 2.0 # Geometric area of the telescope
omega_mb = np.pi * theta_maj * theta_min / np.log(2) / 4 # Main beam solid angle
omega_a = omega_mb / eta_mb # beam solid angle
Ae = (c / F) ** 2 / omega_a # Effective Aperture (m^2): lambda^2 / omega_a
eta_a = Ae / Ag # Aperture efficiency
# Coupling from the "S"ource to outside of "W"indow
eta_pol = 0.5 # Instrument is single polarization
eta_atm_spec = weighted_average(eta_atm, eta_inband)
eta_atm_cont = weighted_average(eta_atm, eta_filter)
eta_sw_spec = (
eta_pol * eta_a * eta_forward_spec * eta_atm_spec
) # Source-Window coupling
eta_sw_cont = (
eta_pol * eta_a * eta_forward_cont * eta_atm_cont
) # Source-Window coupling
# NESP: Noise Equivalent Source Power (an intermediate quantitiy)
# .........................................................
NESP_spec = NEPinst_spec / eta_sw_spec # Noise equivalnet source power
NESP_cont = NEPinst_cont / eta_sw_cont # Noise equivalnet source power
# NEF: Noise Equivalent Flux (an intermediate quantitiy)
# .........................................................
# From this point, units change from Hz^-0.5 to t^0.5
# sqrt(2) is because NEP is defined for 0.5 s integration.
NEF_spec = NESP_spec / Ag / np.sqrt(2) # Noise equivalent flux
NEF_cont = NESP_cont / Ag / np.sqrt(2) # Noise equivalent flux
# If the observation is involves ON-OFF sky subtraction,
# Subtraction of two noisy sources results in sqrt(2) increase in noise.
if on_off:
NEF_spec = np.sqrt(2) * NEF_spec
NEF_cont = np.sqrt(2) * NEF_cont
# MDLF (Minimum Detectable Line Flux)
# .........................................................
# Note that eta_IBF does not matter for MDLF because it is flux.
MDLF = NEF_spec * snr / np.sqrt(obs_hours * on_source_fraction * 60.0 * 60.0)
# NEFD (Noise Equivalent Flux Density)
# .........................................................
continuum_NEFD = NEF_cont / W_F_cont
spectral_NEFD = NEF_spec / W_F_spec # = continuum_NEFD / eta_IBF > spectral_NEFD
# Mapping Speed (line, 1 channel) (arcmin^2 mJy^-2 h^-1)
# .........................................................
MS = (
60.0
* 60.0
* 1.0
* omega_mb
* (180.0 / np.pi * 60.0) ** 2.0
/ (np.sqrt(2) * spectral_NEFD * 1e29) ** 2.0
)
# Equivalent Trx
# .........................................................
Trx_spec = NEPinst_spec / k / np.sqrt(2 * W_F_cont) - T_from_psd(
F, weighted_average(psd_wo, eta_inband)
) # assumes RJ!
Trx_cont = NEPinst_spec / k / np.sqrt(2 * W_F_cont) - T_from_psd(
F, weighted_average(psd_wo, eta_filter)
) # assumes RJ!
# ############################################
# 3. Output results as Pandas DataFrame
# ############################################
result = pd.concat(
[
pd.Series(F, name="F"),
pd.Series(pwv, name="PWV"),
pd.Series(EL, name="EL"),
pd.Series(eta_atm_spec, name="eta_atm"),
pd.Series(eta_atm_cont, name="eta_atm_cont"),
pd.Series(R, name="R"),
pd.Series(W_F_spec, name="W_F_spec"),
pd.Series(W_F_cont, name="W_F_cont"),
pd.Series(theta_maj, name="theta_maj"),
pd.Series(theta_min, name="theta_min"),
pd.Series(eta_a, name="eta_a"),
pd.Series(eta_mb, name="eta_mb"),
pd.Series(eta_forward_spec, name="eta_forward"),
pd.Series(eta_forward_cont, name="eta_forward_cont"),
pd.Series(eta_sw_spec, name="eta_sw"),
pd.Series(eta_sw_cont, name="eta_sw_cont"),
pd.Series(weighted_average(eta_window, eta_inband), name="eta_window"),
pd.Series(weighted_average(eta_window, eta_filter), name="eta_window_cont"),
pd.Series(eta_inst_spec, name="eta_inst"),
pd.Series(eta_inst_cont, name="eta_inst_cont"),
pd.Series(eta_circuit, name="eta_circuit"),
pd.Series(
weighted_average(T_from_psd(F_int, psd_sky), eta_filter), name="Tb_sky"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_M1), eta_filter), name="Tb_M1"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_M2), eta_filter), name="Tb_M2"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_wo), eta_filter), name="Tb_wo"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_window), eta_filter),
name="Tb_window",
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_co), eta_filter), name="Tb_co"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_filter), eta_filter),
name="Tb_filter",
),
pd.Series(
T_from_psd(F, eta_circuit * weighted_average(psd_filter, eta_filter)),
name="Tb_KID",
),
pd.Series(Pkid, name="Pkid"),
pd.Series(Pkid_sky, name="Pkid_sky"),
pd.Series(Pkid_warm, name="Pkid_warm"),
pd.Series(Pkid_cold, name="Pkid_cold"),
pd.Series(n_ph_spec, name="n_ph"),
pd.Series(n_ph_cont, name="n_ph_cont"),
pd.Series(NEPkid, name="NEPkid"),
pd.Series(NEPinst_spec, name="NEPinst"),
pd.Series(NEPinst_cont, name="NEPinst_cont"),
pd.Series(spectral_NEFD, name="NEFD_line"),
pd.Series(continuum_NEFD, name="NEFD_continuum"),
pd.Series(NEF_spec, name="NEF"),
pd.Series(NEF_cont, name="NEF_cont"),
pd.Series(MDLF, name="MDLF"),
pd.Series(MS, name="MS"),
pd.Series(snr, name="snr"),
pd.Series(obs_hours, name="obs_hours"),
pd.Series(on_source_fraction, name="on_source_fraction"),
| pd.Series(obs_hours * on_source_fraction, name="on_source_hours") | pandas.Series |
#%%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
df = | pd.read_csv('data/cleanedData.csv') | pandas.read_csv |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import webbrowser
import warnings
from .file_download import update_index
from .file_tools import validate_version, get_version_files_paths
from .dataframe_tools import add_index_levels
from .exceptions import *
class DataSet:
"""
Note that all cancer datasets are class objects that inherit from cptac.dataset. Therefore
the same function calls exist for cptac.Endometrial, cptac.Colon, etc.
"""
def __init__(self, cancer_type, version, valid_versions, data_files):
"""Initialize variables for a DataSet object.
Parameters:
cancer_type (str): The cancer type requested for this dataset
version (str): The version number requested for this dataset
valid_versions (list of str): A list of all possible valid versions for this dataset
data_files (dict, keys of str, values of list of str): A dictionary where the keys are the existing version of the dataset, and the values are lists of the data file names for that version.
"""
# Initialize the _cancer_type instance variable
self._cancer_type = cancer_type.lower()
# Update the index, if possible. If there's no internet, that's fine.
try:
update_index(self._cancer_type)
except NoInternetError:
pass
# Validate the version
self._version = validate_version(version, self._cancer_type, use_context="init", valid_versions=valid_versions)
# Get the paths to the data files
version_data_files = data_files[self._version] # Get the data files for this version from the data files dictionary
self._data_files_paths = get_version_files_paths(self._cancer_type, self._version, version_data_files)
# Initialize dataframe and definitions dicts as empty for this parent class
self._data = {}
self._definitions = {}
# Assign the valid dfs lists, but make them instance variables so they're easy to override if needed
# These are the omics dataframes that are valid for use in the utilities functions
self._valid_omics_dfs = [
'acetylproteomics',
'circular_RNA',
'CNV',
'lincRNA',
'lipidomics',
'metabolomics',
'miRNA',
'phosphoproteomics',
'phosphoproteomics_gene',
'proteomics',
'somatic_mutation_binary',
'transcriptomics',
]
# These are the metadata dataframes that are valid for use in the utilities functions
self._valid_metadata_dfs = [
"clinical",
"derived_molecular",
"experimental_design",
#"followup", # Right now there are duplicate rows, so don't include follow up tables for joins.
] # We don't allow the treatment df, as in Ovarian, or medical_history df, as in Ccrcc, because they both have multiple rows for each sample.
# Methods to get metadata dataframes
def get_clinical(self):
"""Get the clinical dataframe."""
return self._get_dataframe("clinical")
def get_derived_molecular(self):
"""Get the derived_molecular dataframe."""
return self._get_dataframe("derived_molecular")
def get_experimental_design(self):
"""Get the experimental_design dataframe."""
return self._get_dataframe("experimental_design")
def get_medical_history(self):
"""Get the medical_history dataframe."""
return self._get_dataframe("medical_history")
def get_treatment(self):
"""Get the treatment dataframe."""
return self._get_dataframe("treatment")
def get_followup(self):
"""Get the followup dataframe."""
return self._get_dataframe("followup")
# Methods to get omics dataframes
def get_acetylproteomics(self):
"""Get the acetylproteomics dataframe."""
return self._get_dataframe("acetylproteomics")
def get_circular_RNA(self):
"""Get the circular_RNA dataframe."""
return self._get_dataframe("circular_RNA")
def get_CNV(self):
"""Get the CNV dataframe."""
return self._get_dataframe("CNV")
def get_lincRNA(self):
"""Get the lincRNA dataframe."""
return self._get_dataframe("lincRNA")
def get_lipidomics(self):
"""Get the lipidomics dataframe."""
return self._get_dataframe("lipidomics")
def get_metabolomics(self):
"""Get the metabolomics dataframe."""
return self._get_dataframe("metabolomics")
def get_methylation(self):
"""Get the methylation dataframe."""
return self._get_dataframe("methylation")
def get_miRNA(self):
"""Get the miRNA dataframe."""
return self._get_dataframe("miRNA")
def get_phosphoproteomics(self):
"""Get the phosphoproteomics dataframe."""
return self._get_dataframe("phosphoproteomics")
def get_phosphoproteomics_gene(self):
"""Get the phosphoproteomics_gene dataframe. The gene level phosphorylation measurement is an aggregate metric which potentially averages together individual measurements of different sites. Use get_phosphoproteomics() to view the data for individual sites."""
return self._get_dataframe("phosphoproteomics_gene")
def get_phosphosites(self, genes):
"""Returns dataframe with all phosphosites of specified gene or list of genes.
Parameters:
genes (str, or list or array-like of str): gene or list of genes to use to select phosphosites. str if single, list or array-like of str if multiple.
Returns:
pandas DataFrame: The phosphoproteomics for the specified gene(s).
"""
return self._get_omics_cols("phosphoproteomics", genes)
def get_proteomics(self):
"""Get the proteomics dataframe."""
return self._get_dataframe("proteomics")
def get_transcriptomics(self):
"""Get the transcriptomics dataframe."""
return self._get_dataframe("transcriptomics")
# Methods to get mutations dataframes
def get_gene_fusion(self):
"""Get the gene_fusion dataframe."""
return self._get_dataframe("gene_fusion")
def get_somatic_mutation(self):
"""Get the somatic_mutation dataframe."""
return self._get_dataframe("somatic_mutation")
def get_somatic_mutation_binary(self):
"""Get the somatic_mutation_binary dataframe, which has a binary value indicating, for each location on each gene, whether there was a mutation in that gene at that location, for each sample."""
return self._get_dataframe("somatic_mutation_binary")
# Help methods
def define(self, term):
"""Print the definition a term, if it is in the dataset's list of definitions.
Parameters:
term (str): term to be defined
Returns: None
"""
if len(self._definitions.keys()) == 0:
raise NoDefinitionsError("No definitions provided for this dataset.")
elif term in self._definitions.keys():
print(self._definitions[term])
else:
raise InvalidParameterError("{} not found in definitions. Check capitalization. Alternatively, the dataset's 'search(<your term>)' method can be used to perform a web search of the term provided.".format(term))
def get_cancer_type(self):
"""Return the cancer type for this dataset, as a string."""
return self._cancer_type
def version(self):
"""Return the dataset version of this instance, as a string."""
return self._version
def how_to_cite(self):
"""Print instructions for citing the data."""
print('Please include the following statement in publications using data accessed through this module:\n"Data used in this publication were generated by the Clinical Proteomic Tumor Analysis Consortium (NCI/NIH, <https://proteomics.cancer.gov/programs/cptac/>). Data were accessed through the Python module cptac, available at <https://pypi.org/project/cptac/>."')
def list_data(self):
"""Print list of loaded dataframes and dimensions."""
print("Below are the dataframes contained in this dataset:")
for name in sorted(self._data.keys(), key=str.lower):
df = self._data[name]
print("\t{}\n\t\tDimensions: {}".format(name, df.shape))
def list_definitions(self):
"""Print all terms defined in the dataset's list of definitions."""
if len(self._definitions.keys()) > 0:
for term in sorted(self._definitions.keys(), key=str.lower):
print(term)
else:
raise NoDefinitionsError("No definitions provided for this dataset.")
def search(self, term):
"""Search for a term in a web browser.
Parameters:
term (str): term to be searched
Returns: None
"""
url = "https://www.google.com/search?q=" + term
message = f"Searching for {term} in web browser..."
print(message, end='\r')
webbrowser.open(url)
print(" " * len(message), end='\r') # Erase the message
def reduce_multiindex(self, df, levels_to_drop=None, flatten=False, sep='_'):
"""Drop levels from and/or flatten the column axis of a dataframe with a column multiindex.
Parameters:
df (pandas DataFrame): The dataframe to make the changes to.
levels_to_drop (str, int, or list or array-like of str or int, optional): Levels, or indices of levels, to drop from the dataframe's column multiindex. These must match the names or indices of actual levels of the multiindex. Must be either all strings, or all ints. Default of None will drop no levels.
flatten (bool, optional): Whether or not to flatten the multiindex. Default of False will not flatten.
sep (str, optional): String to use to separate index levels when flattening. Default is underscore.
Returns:
pandas DataFrame: The dataframe, with the desired column index changes made.
"""
# Make a copy, so the original dataframe is preserved
df = df.copy(deep=True)
if levels_to_drop is not None:
if df.columns.nlevels < 2:
raise DropFromSingleIndexError("You attempted to drop level(s) from an index with only one level.")
if isinstance(levels_to_drop, (str, int)):
levels_to_drop = [levels_to_drop]
elif not isinstance(levels_to_drop, (list, pd.core.series.Series, pd.core.indexes.base.Index)):
raise InvalidParameterError(f"Parameter 'levels_to_drop' is of invalid type {type(levels_to_drop)}. Valid types: str, int, list or array-like of str or int, or NoneType.")
# Check that they're not trying to drop too many columns
existing_len = len(df.columns.names)
to_drop_len = len(levels_to_drop)
if to_drop_len >= existing_len:
raise InvalidParameterError(f"You tried to drop too many levels from the dataframe column index. The most levels you can drop is one less than however many exist. {existing_len} levels exist; you tried to drop {to_drop_len}.")
# Check that the levels they want to drop all exist
to_drop_set = set(levels_to_drop)
if all(isinstance(level, int) for level in to_drop_set):
existing_set_indices = set(range(len(df.columns.names)))
if not to_drop_set <= existing_set_indices:
raise InvalidParameterError(f"Some level indices in {levels_to_drop} do not exist in dataframe column index, so they cannot be dropped. Existing column level indices: {list(range(len(df.columns.names)))}")
else:
existing_set = set(df.columns.names)
if not to_drop_set <= existing_set:
raise InvalidParameterError(f"Some levels in {levels_to_drop} do not exist in dataframe column index, so they cannot be dropped. Existing column levels: {df.columns.names}")
df.columns = df.columns.droplevel(levels_to_drop)
num_dups = df.columns.duplicated(keep=False).sum()
if num_dups > 0:
warnings.warn(f"Due to dropping the specified levels, dataframe now has {num_dups} duplicated column headers.", DuplicateColumnHeaderWarning, stacklevel=2)
if flatten:
if df.columns.nlevels < 2:
warnings.warn("You tried to flatten an index that didn't have multiple levels, so we didn't actually change anything.", FlattenSingleIndexWarning, stacklevel=2)
return df
tuples = df.columns.to_flat_index() # Converts multiindex to an index of tuples
no_nan = tuples.map(lambda x: [item for item in x if pd.notnull(item)]) # Cut any NaNs out of tuples
joined = no_nan.map(lambda x: sep.join(x)) # Join each tuple
df.columns = joined
df.columns.name = "Name" # For consistency
return df
def get_genotype_all_vars(self, mutations_genes, mutations_filter=None, show_location=True, mutation_hotspot=None):
"""Return a dataframe that has the mutation type and wheather or not it is a multiple mutation
Parameters:
mutation_genes (str, or list or array-like of str): The gene(s) to get mutation data for.
mutations_filter (list, optional): List of mutations to prioritize when filtering out multiple mutations, in order of priority.
show_location (bool, optional): Whether to include the Location column from the mutation dataframe. Defaults to True.
mutation_hotspot (optional): a list of hotspots
"""
#If they don't give us a filter, this is the default.
if mutations_filter == None:
mutations_filter = ["Deletion", 'Frame_Shift_Del', 'Frame_Shift_Ins', 'Nonsense_Mutation', 'Missense_Mutation_hotspot',
'Missense_Mutation', 'Amplification', 'In_Frame_Del', 'In_Frame_Ins', 'Wildtype']
#combine the cnv and mutations dataframe
combined = self.join_omics_to_mutations(omics_df_name="CNV", mutations_genes=mutations_genes, omics_genes=mutations_genes)
#drop the database index from ccrcc
if self.get_cancer_type() == "ccrcc":
cc = self.get_CNV()
drop = ['Database_ID']
combined = self.reduce_multiindex(df=combined, levels_to_drop=drop)
#If there are hotspot mutations, append 'hotspot' to the mutation type so that it's prioritized correctly
def mark_hotspot_locations(row):
#iterate through each location in the current row
mutations = []
for location in row[mutations_genes+'_Location']:
if location in mutation_hotspot: #if it's a hotspot mutation
#get the position of the location
position = row[mutations_genes+'_Location'].index(location)
#use that to change the correct mutation
mutations.append(row[mutations_genes+"_Mutation"][position] + "_hotspot")
else:
# get the position of the location
position = row[mutations_genes+'_Location'].index(location)
mutations.append(row[mutations_genes+"_Mutation"][position])
return mutations
if mutation_hotspot is not None:
combined['hotspot'] = combined.apply(mark_hotspot_locations, axis=1)
combined[mutations_genes+"_Mutation"] = combined['hotspot']
combined = combined.drop(columns='hotspot')
# Based on cnv make a new column with mutation type that includes deletions and amplifications
def add_del_and_amp(row):
if row[mutations_genes+"_CNV"] <= -.2:
mutations = row[mutations_genes+"_Mutation"] + ['Deletion']
locations = row[mutations_genes+'_Location']+['Deletion']
elif row[mutations_genes+"_CNV"] >= .2:
mutations = row[mutations_genes+"_Mutation"] + ['Amplification']
locations = row[mutations_genes+'_Location']+['Amplification']
else:
mutations = row[mutations_genes+"_Mutation"]
locations = row[mutations_genes+"_Location"]
return mutations, locations
combined['mutations'], combined['locations'] = zip(*combined.apply(add_del_and_amp, axis=1))
#now that we have the deletion and amplifications, we need to prioritize the correct mutations.
def sort(row):
sortedcol = []
location = []
chosen_indices = []
sample_mutations_list = row['mutations']
sample_locations_list = row['locations']
if len(sample_mutations_list) == 1: #if there's only one mutation in the list
sortedcol.append(sample_mutations_list[0])
location.append(sample_locations_list[0])
else:
for filter_val in mutations_filter: # This will start at the beginning of the filter list, thus filters earlier in the list are prioritized, like we want
if filter_val in sample_mutations_list:
chosen_indices = [index for index, value in enumerate(sample_mutations_list) if value == filter_val]
if len(chosen_indices) > 0: # We found at least one mutation from the filter to prioritize, so we don't need to worry about later values in the filter priority list
break
if len(chosen_indices) == 0: # None of the mutations for the sample were in the filter
for mutation in sample_mutations_list:
if mutation in truncations:
chosen_indices += [index for index, value in enumerate(sample_mutations_list) if value == mutation]
soonest_mutation = sample_mutations_list[chosen_indices[0]]
soonest_location = sample_locations_list[chosen_indices[0]]
chosen_indices.clear()
sortedcol.append(soonest_mutation)
location.append(soonest_location)
return pd.Series([sortedcol, location],index=['mutations', 'locations'])
df = combined.apply(sort, axis=1)
combined['Mutation'] = df['mutations']
combined['Location'] = df['locations']
#get a sample_status column that says if the gene has multiple mutations (including dletion and amplification)
def sample_status(row):
if len(row['mutations']) > 1: #if there's more than one mutation
if len(row['mutations']) == 2 and "Wildtype_Tumor" in row['mutations']: #one of the mutations might be a "wildtype tumor"
status ="Single_mutation"
elif len(row['mutations']) == 2 and "Wildtype_Normal" in row['mutations']:
status ="Single_mutation"
else:
status = "Multiple_mutation"
else:
if row["mutations"] == ["Wildtype_Normal"]:
status = "Wildtype_Normal"
elif row['mutations'] == ['Wildtype_Tumor']:
status = "Wildtype_Tumor"
else:
status = "Single_mutation"
return status
combined['Mutation_Status'] = combined.apply(sample_status, axis=1)
#drop all the unnecessary Columns
df = combined.drop(columns=[mutations_genes+"_CNV", mutations_genes+"_Mutation", mutations_genes+"_Location", mutations_genes+"_Mutation_Status", 'Sample_Status', 'mutations','locations'])
if show_location == False: df = df.drop(columns="Location") #if they don't want us to show the location, drop it
return df
# Join functions
def join_omics_to_omics(self, df1_name, df2_name, genes1=None, genes2=None):
"""Take specified column(s) from one omics dataframe, and join to specified columns(s) from another omics dataframe. Intersection (inner join) of indices is used.
Parameters:
df1_name (str): Name of first omics dataframe to select columns from.
df2_name (str): Name of second omics dataframe to select columns from.
genes1 (str, or list or array-like of str, optional): Gene(s) for column(s) to select from df1_name. str if one key, list or array-like of str if multiple. Default of None will select entire dataframe.
genes2 (str, or list or array-like of str, optional): Gene(s) for Column(s) to select from df2_name. str if one key, list or array-like of str if multiple. Default of None will select entire dataframe.
Returns:
pandas DataFrame: The selected columns from the two omics dataframes, joined into one dataframe.
"""
# Select the columns from each dataframe
selected1 = self._get_omics_cols(df1_name, genes1)
selected2 = self._get_omics_cols(df2_name, genes2)
# Make the multiindices the same
if selected1.columns.names != selected2.columns.names:
selected1.columns = add_index_levels(to=selected1.columns, source=selected2.columns)
selected2.columns = add_index_levels(to=selected2.columns, source=selected1.columns)
df = selected1.join(selected2, how='outer')
# Warn them about any NaNs that were inserted in the outer join
self._warn_inserted_nans(df1_name, df2_name, selected1.index, selected2.index)
df = df.sort_index() # Sort rows in ascending order
return df
def join_omics_to_mutations(self, omics_df_name, mutations_genes, omics_genes=None, mutations_filter=None, show_location=True):
"""Select all mutations for specified gene(s), and joins them to all or part of the given omics dataframe. Intersection (inner join) of indices is used. Each location or mutation cell contains a list, which contains the one or more location or mutation values corresponding to that sample for that gene, or a value indicating that the sample didn't have a mutation in that gene.
Parameters:
omics_df (str): Name of omics dataframe to join the mutation data to.
mutations_genes (str, or list or array-like of str): The gene(s) to get mutation data for. str if one gene, list or array-like of str if multiple.
omics_genes (str, or list or array-like of str, optional): Gene(s) to select from the omics dataframe. str if one gene, list or array-like of str if multiple. Default will select entire dataframe.
mutations_filter (list, optional): List of mutations to prioritize when filtering out multiple mutations, in order of priority. If none of the multiple mutations in a sample are included in filter_prefer, the function will automatically prioritize truncation over missense mutations, and then mutations earlier in the sequence over later mutations. Passing an empty list will cause this default hierarchy to be applied to all samples. Default parameter of None will cause no filtering to be done, and all mutation data will be included, in a list.
show_location (bool, optional): Whether to include the Location column from the mutation dataframe. Defaults to True.
Returns:
pandas DataFrame: The mutations for the specified gene, joined to all or part of the omics dataframe. Each location or mutation cell contains a list, which contains the one or more location or mutation values corresponding to that sample for that gene, or a value indicating that the sample didn't have a mutation in that gene.
"""
# Select the data from each dataframe
omics = self._get_omics_cols(omics_df_name, omics_genes)
mutations = self._get_genes_mutations(mutations_genes, mutations_filter)
mutations_were_filtered = mutations_filter is not None
joined = self._join_other_to_mutations(omics, mutations, mutations_were_filtered, show_location)
# Warn them about any NaNs that were inserted in the outer join
self._warn_inserted_nans(omics_df_name, "somatic_mutation", omics.index, mutations.index)
return joined
def join_metadata_to_metadata(self, df1_name, df2_name, cols1=None, cols2=None):
"""Take specified column(s) from one metadata dataframe, and join to specified columns(s) from another metadata dataframe. Intersection (inner join) of indices is used.
Parameters:
df1_name (str): Name of first metadata dataframe to select columns from.
df2_name (str): Name of second metadata dataframe to select columns from.
cols1 (str, or list or array-like of str, optional): Column(s) to select from df1_name. str if one key, list or array-like of str if multiple. Default of None will select entire dataframe.
cols2 (str, or list or array-like of str, optional): Column(s) to select from df2_name. str if one key, list or array-like of str if multiple. Default of None will select entire dataframe.
Returns:
pandas DataFrame: The selected columns from the two metadata dataframes, joined into one dataframe.
"""
# Select the columns from each dataframe
selected1 = self._get_metadata_cols(df1_name, cols1)
selected2 = self._get_metadata_cols(df2_name, cols2)
df = selected1.join(selected2, how='outer', rsuffix='_from_' + df2_name) # Use suffix in case both dataframes have a particular column, such as Patient_ID
# Warn them about any NaNs that were inserted in the outer join
self._warn_inserted_nans(df1_name, df2_name, selected1.index, selected2.index)
df = df.sort_index() # Sort rows in ascending order
return df
def join_metadata_to_omics(self, metadata_df_name, omics_df_name, metadata_cols=None, omics_genes=None):
"""Joins columns from a metadata dataframe (clinical, derived_molecular, or experimental_design) to part or all of an omics dataframe. Intersection (inner join) of indices is used.
Parameters:
metadata_df_name (str): Name of metadata dataframe to select columns from.
omics_df_name (str): Name of omics dataframe to join the metadata columns to.
metadata_cols (str, or list or array-like of str, optional): Column(s) to select from the metadata dataframe. str if one gene, list or array-like of str if multiple. Default is None, which will select the entire metadata dataframe.
omics_genes (str, or list or array-like of str, optional): Gene(s) to select data for from the omics dataframe. str if one gene, list or array-like of str if multiple. Default is None, which will select entire dataframe.
Returns:
pandas DataFrame: The selected metadata columns, joined with all or part of the omics dataframe.
"""
# Select the columns from each dataframe
metadata_selected = self._get_metadata_cols(metadata_df_name, metadata_cols)
omics_selected = self._get_omics_cols(omics_df_name, omics_genes)
# Make the indices the same
if metadata_selected.columns.names != omics_selected.columns.names:
metadata_selected.columns = add_index_levels(to=metadata_selected.columns, source=omics_selected.columns)
joined = metadata_selected.join(omics_selected, how='outer')
# Warn them about any NaNs that were inserted in the outer join
self._warn_inserted_nans(metadata_df_name, omics_df_name, metadata_selected.index, omics_selected.index)
joined = joined.sort_index() # Sort rows in ascending order
return joined
def join_metadata_to_mutations(self, metadata_df_name, mutations_genes, metadata_cols=None, mutations_filter=None, show_location=True):
"""Select all mutations for specified gene(s), and joins them to all or part of the given metadata dataframe. Intersection (inner join) of indices is used. Each location or mutation cell contains a list, which contains the one or more location or mutation values corresponding to that sample for that gene, or a value indicating that the sample didn't have a mutation in that gene.
Parameters:
metadata_df_name (str): Name of metadata dataframe to join the mutation data to.
mutations_genes (str, or list or array-like of str): The gene(s) to get mutation data for. str if one gene, list or array-like of str if multiple.
metadata_cols (str, or list or array-like of str, optional): Gene(s) to select from the metadata dataframe. str if one gene, list or array-like of str if multiple. Default will select entire dataframe.
mutations_filter (list, optional): List of mutations to prioritize when filtering out multiple mutations, in order of priority. If none of the multiple mutations in a sample are included in filter_prefer, the function will automatically prioritize truncation over missense mutations, and then mutations earlier in the sequence over later mutations. Passing an empty list will cause this default hierarchy to be applied to all samples. Default parameter of None will cause no filtering to be done, and all mutation data will be included, in a list.
show_location (bool, optional): Whether to include the Location column from the mutation dataframe. Defaults to True.
Returns:
pandas DataFrame: The mutations for the specified gene, joined to all or part of the metadata dataframe. Each location or mutation cell contains a list, which contains the one or more location or mutation values corresponding to that sample for that gene, or a value indicating that the sample didn't have a mutation in that gene.
"""
# Select the data from each dataframe
metadata = self._get_metadata_cols(metadata_df_name, metadata_cols)
mutations = self._get_genes_mutations(mutations_genes, mutations_filter)
mutations_were_filtered = mutations_filter is not None
joined = self._join_other_to_mutations(metadata, mutations, mutations_were_filtered, show_location)
# Warn them about any NaNs that were inserted in the outer join
self._warn_inserted_nans(metadata_df_name, "somatic_mutation", metadata.index, mutations.index)
return joined
# "Private" methods
def _get_dataframe(self, name):
"""Check if a dataframe with the given name exists, and return a copy of it if it does.
Parameters:
name (str): The name of the dataframe to get.
Returns:
pandas DataFrame: A copy of the desired dataframe, if it exists in this dataset.
"""
if name in self._data.keys():
df = self._data[name]
return_df = df.copy(deep=True) # We copy it, with deep=True, so edits on their copy don't affect the master for this instance
return return_df
else:
raise DataframeNotIncludedError(f"{name} dataframe not included in the {self.get_cancer_type()} dataset.")
def _get_sample_status_map(self):
"""Get a pandas Series from the clinical dataframe, with sample ids as the index, and each sample's status (tumor or normal) as the values."""
clinical = self.get_clinical()
status_map = clinical["Sample_Tumor_Normal"]
status_map.name = "Sample_Status"
return status_map
def _check_df_valid(self, df_name, df_type):
"""Checks whether a dataframe with this name is valid for use as an omics or metadata dataframe in one of the utilties functions. Throws an InvalidParameterError if it isn't.
Parameters:
df_name (str): The dataframe name to check.
df_type (str): Which type of dataframe we're validating--either "omics" or "metadata"
Returns: None
"""
if not isinstance(df_name, str): # Check that they passed a str, since utilities functions used to directly accept dataframes
raise InvalidParameterError(f"Please pass a str for dataframe name parameter. You passed {df_name}, which is a {type(df_name)}")
if df_type == "omics":
valid_dfs = self._valid_omics_dfs
elif df_type == "metadata":
valid_dfs = self._valid_metadata_dfs
else:
raise CptacDevError(f"Invalid df_type of {df_type} passed to cptac.DataSet._check_df_valid.")
if df_name not in self._data.keys():
raise DataframeNotIncludedError(f"{df_name} dataframe not included in the {self.get_cancer_type()} dataset.")
elif df_name not in valid_dfs:
error_msg = f"{df_name} is not a valid {df_type} dataframe for this function in this dataset. Valid options:"
for valid_name in valid_dfs:
if valid_name in self._data.keys(): # Only print it if it's included in this dataset
error_msg = error_msg + '\n\t' + valid_name
raise InvalidParameterError(error_msg)
def _warn_inserted_nans(self, name1, name2, index1, index2):
"""Compare two indices from two dataframes, and warn the user that any rows with index values not in both indices were filled with NaNs in a join function.
Parameters:
name1 (str): Name of the dataframe the first index came from
name2 (str): Name of the dataframe the second index came from
index1 (pandas Index): First index to compare
index2 (pandas Index): Second index to compare
Returns: None
"""
unique1 = index1.difference(index2)
unique2 = index2.difference(index1)
self._issue_inserted_nans_warning(unique1, name2)
self._issue_inserted_nans_warning(unique2, name1)
def _issue_inserted_nans_warning(self, unique, other_name):
"""Issue a warning that the samples in unique were not found in the other_name dataframe, and those column(s) were filled with NaN.
Parameters:
unique (list or array-like of str): The samples that weren't in the other_name dataframe.
other_name (str): The name of the dataframe the samples weren't found in.
Returns: None
"""
if other_name == "somatic_mutation":
return # This will have separate fill warnings printed, because we use different fill values.
elif len(unique) > 0:
warnings.warn(f"{other_name} data was not found for the following samples, so {other_name} data columns were filled with NaN for these samples: {', '.join(unique)}", InsertedNanWarning, stacklevel=4)
def _get_omics_cols(self, omics_df_name, genes):
"""Based on a single gene, or a list or array-like of genes, select multiple columns from an omics dataframe, and return the selected columns as one dataframe.
Parameters:
omics_df_name (str): Name of omics dataframe to select column(s) from.
genes (str, or list or array-like of str): Gene(s) to use to select columns from omics_df. str if one gene, list or array-like if multiple. Passing None will select the entire omics dataframe.
Returns:
pandas DataFrame: The selected columns from the dataframe.
"""
# Check that they passed a valid omics df
self._check_df_valid(omics_df_name, "omics")
# Get our omics df, using _get_dataframe to catch invalid requests
omics_df = self._get_dataframe(omics_df_name)
# Process genes parameter
if isinstance(genes, str): # If it's a single gene, make it a list so we can treat everything the same
genes = [genes]
elif isinstance(genes, (list, pd.core.series.Series, pd.core.indexes.base.Index)): # If it's already a list or array-like, we're all good
pass
elif genes is None: # If it's the default of None, rename columns and return the entire dataframe
# Add the gene name to end beginning of each column header, to preserve info when we join dataframes.
if isinstance(omics_df.columns, pd.core.index.MultiIndex):
omics_df.columns = omics_df.columns.set_levels(omics_df.columns.levels[0] + '_' + omics_df_name, level=0)
else:
omics_df = omics_df.add_suffix('_' + omics_df_name)
return omics_df
else: # If it's none of those, they done messed up. Tell 'em.
raise InvalidParameterError("Genes parameter \n{}\nis of invalid type {}. Valid types: str, list or array-like of str, or NoneType.".format(genes, type(genes)))
genes = pd.Index(genes, name="Name")
if isinstance(omics_df.columns, pd.core.index.MultiIndex):
contained = genes.intersection(omics_df.columns.get_level_values("Name")).drop_duplicates() # Get the genes that actually exist in the dataframe's columns
mi_contained = omics_df.columns[omics_df.columns.get_level_values("Name").isin(genes)]
not_contained = genes.difference(contained).drop_duplicates() # So we can warn the user later
arrays = [not_contained] + [[np.nan] for i in range(omics_df.columns.nlevels - 1)]
mi_not_contained = pd.MultiIndex.from_product(arrays, names=omics_df.columns.names)
genes = mi_contained.union(mi_not_contained) # To use for reindexing the dataframe
else:
contained = genes.intersection(omics_df.columns).drop_duplicates() # Get the genes that actually exist in the dataframe's columns
not_contained = genes.difference(contained).drop_duplicates() # So we can warn the user later
selected = omics_df[contained]
selected = selected.reindex(columns=genes) # This will add the columns not included in the dataframe, and fill them with NaN.
# Warn the user about columns filled with NaN
if len(not_contained) > 0:
warnings.warn(f"The following columns were not found in the {omics_df_name} dataframe, so they were inserted into joined table, but filled with NaN: {', '.join(not_contained)}", ParameterWarning, stacklevel=3)
# Append dataframe name to end of each column header, to preserve info when we merge dataframes
if isinstance(omics_df.columns, pd.core.index.MultiIndex):
selected.columns = selected.columns.set_levels(selected.columns.levels[0] + '_' + omics_df_name, level=0)
else:
selected = selected.add_suffix('_' + omics_df_name)
return selected
def _get_metadata_cols(self, df_name, cols):
"""Select a single column or several columns from a metadata dataframe.
Parameters:
df_name (str): The name of the metadata dataframe to select the column(s) from.
cols (str, or list or array-like of str): The column(s) to select from the dataframe. str if single, list or array-like of str if multiple. Passing None will select the entire dataframe.
Returns:
pandas DataFrame: The specified columns from the given dataframe.
"""
# Check that they passed a valid metadata df
self._check_df_valid(df_name, "metadata")
# Get our dataframe, using _get_dataframe to catch invalid requests
df = self._get_dataframe(df_name)
# Process genes parameter
if isinstance(cols, str): # If it's a single column, make it a list so we can treat everything the same
cols = [cols]
elif isinstance(cols, (list, pd.core.series.Series, pd.core.indexes.base.Index)): # If it's already a list or array-like, we're all good
pass
elif cols is None: # If it's the default of None, return the entire dataframe
return df
else: # If it's none of those, they done messed up. Tell 'em.
raise InvalidParameterError("Columns parameter {} is of invalid type {}. Valid types: str, or list or array-like of str.".format(cols, type(cols)))
cols = pd.Index(cols).drop_duplicates()
# Check that they didn't pass any invalid columns
not_contained = cols.difference(df.columns)
if len(not_contained) > 0:
raise InvalidParameterError(f'The following columns were not found in the {df_name} dataframe: {", ".join(not_contained)}')
selected = df[cols]
return selected
def _get_genes_mutations(self, genes, mutations_filter):
"""Gets all the mutations for one or multiple genes, for all patients.
Parameters:
genes (str, or list or array-like of str): The gene(s) to grab mutations for. str if one, list or array-like of str if multiple.
mutations_filter (list, optional): List of mutations to prioritize when filtering out multiple mutations, in order of priority. If none of the multiple mutations in a sample are included in filter_prefer, the function will automatically prioritize truncation over missense mutations, and then mutations earlier in the sequence over later mutations. Passing an empty list will cause this default hierarchy to be applied to all samples. Passing None will cause no filtering to be done, and all mutation data will be included, in a list.
Returns:
pandas DataFrame: The mutations in each patient for the specified gene(s).
"""
somatic_mutation = self.get_somatic_mutation()
# Process genes parameter
if isinstance(genes, str): # If it's a single gene, make it a list so we can treat everything the same
genes = [genes]
elif isinstance(genes, (list, pd.core.series.Series, pd.core.indexes.base.Index)): # If it's already a list or array-like, we're all good
pass
else: # If it's neither of those, they done messed up. Tell 'em.
raise InvalidParameterError("Genes parameter {} is of invalid type {}. Valid types: str, or list or array-like of str.".format(genes, type(genes)))
# Set some column names for use later
gene_col = "Gene"
mutation_col = "Mutation"
location_col = "Location"
mutation_status_col = "Mutation_Status"
# Check that they didn't make any typos in specifying filter values
invalid_filter = False
if mutations_filter is not None:
for filter_val in mutations_filter:
if (filter_val not in somatic_mutation[mutation_col].values) and (filter_val not in somatic_mutation[location_col].values):
raise InvalidParameterError(f"Filter value {filter_val} does not exist in the mutations dataframe for this dataset. Check for typos and existence. Merge aborted.")
# Create an empty dataframe, which we'll fill with the columns we select using our genes, and then return.
df = pd.DataFrame(index=somatic_mutation.index.copy().drop_duplicates())
genes = | pd.Series(genes) | pandas.Series |
# %%
import os
import pandas as pd
import numpy as np
from fcutils.plotting.colors import colorMap
from analysis.misc.paths import cellfinder_cells_folder, cellfinder_out_dir, injections_folder
from analysis.anatomy.utils import *
# %%
import matplotlib.pyplot as plt
for i in range(100):
color = colorMap(i, name='YlOrBr', vmin=0, vmax=100)
# plt.scatter(0, i, c=color, s=20)
print(color)
# %%
# Merge highest projecting regions in a summary datafame
cell_files = dict(
# cc_136_0 = ('GRN', 'right', 'CC_136_0_ch0_cells.h5'),
# cc_136_1 = ('GRN', 'right', 'CC_136_1_ch0_cells.h5'),
cc_134_1 = ('SCm', 'left', 'CC_134_1_ch1_cells.h5'),
cc_134_2 = ('SCm', 'left', 'CC_134_2_ch1_cells.h5'),
)
data = {}
df = pd.DataFrame()
ipsidf, contradf = pd.DataFrame(), pd.DataFrame()
for mouse, (inj, hemi, path) in cell_files.items():
all_cells = pd.read_hdf(os.path.join(cellfinder_cells_folder, path), key='hdf')
all_cells = all_cells.loc[all_cells.region != inj]
n_cells = len(all_cells)
threshold = 2
ipsi = all_cells.loc[all_cells.hemisphere == hemi]
ipsi = (ipsi.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
ipsi = ipsi.loc[ipsi.x > threshold].x.rename(f'{mouse}_{inj}_ipsi').round(2)
contra = all_cells.loc[all_cells.hemisphere != hemi]
contra = (contra.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
contra = contra.loc[contra.x > threshold].x.rename(f'{mouse}_{inj}_contra').round(2)
df = pd.concat([df, ipsi, contra], axis=1).sort_index()
ipsidf = pd.concat([ipsidf, ipsi], axis=1).sort_index()
contradf = pd.concat([contradf, contra], axis=1).sort_index()
# print(df.to_markdown())
# %%
import networkx as nx
ipsi = ipsidf.sum(axis=1)/2
contra = contradf.sum(axis=1)/2
edges = []
regions = list(df.index)
for reg in regions:
# try:
# edges.append((f'{reg}_r', 'SC_r', {'weight':ipsi[reg]}))
# except:
# pass
try:
edges.append((f'{reg}_r', 'SC_l', {'weight':contra[reg]}))
except:
pass
# try:
# edges.append((f'{reg}_l', 'SC_r', {'weight':contra[reg]}))
# except:
# pass
try:
edges.append((f'{reg}_l', 'SC_l', {'weight':ipsi[reg]}))
except:
pass
# edges.append((f'{reg}_l', f'{reg}_r', {'weight':1}))
G=nx.Graph()
G.add_edges_from(edges)
nx.draw(G, with_labels=True, pos=nx.spring_layout(G))
# %%
cell_files = dict(
cc_136_0 = ('GRN', 'right', 'CC_136_0_ch0_cells.h5'),
cc_136_1 = ('GRN', 'right', 'CC_136_1_ch0_cells.h5'),
# cc_134_1 = ('SCm', 'left', 'CC_134_1_ch1_cells.h5'),
# cc_134_2 = ('SCm', 'left', 'CC_134_2_ch1_cells.h5'),
)
data = {}
df = pd.DataFrame()
ipsidf, contradf = pd.DataFrame(), pd.DataFrame()
for mouse, (inj, hemi, path) in cell_files.items():
all_cells = pd.read_hdf(os.path.join(cellfinder_cells_folder, path), key='hdf')
all_cells = all_cells.loc[all_cells.region != inj]
n_cells = len(all_cells)
ipsi = all_cells.loc[all_cells.hemisphere == hemi]
ipsi = (ipsi.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
ipsi = ipsi.loc[ipsi.x > threshold].x.rename(f'{mouse}_{inj}_ipsi').round(2)
contra = all_cells.loc[all_cells.hemisphere != hemi]
contra = (contra.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
contra = contra.loc[contra.x > threshold].x.rename(f'{mouse}_{inj}_contra').round(2)
df = | pd.concat([df, ipsi, contra], axis=1) | pandas.concat |
import pandas as pd
from pymongo import MongoClient
class KAnonymizer:
names = (
'age',
'region', #Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
'gender', # "weight" of that person in the dataset (i.e. how many people does that person represent) -> https://www.kansascityfed.org/research/datamuseum/cps/coreinfo/keyconcepts/weights
'job',
'religion',
'language',
'marital',
'sa',
'rowno'
)
# some fields are categorical and will require special treatment
categorical = set((
'region',
'gender',
'job',
'religion',
'language',
'marital',
'sa',
))
def configure(self):
conn = MongoClient()
# database
db = conn.ppdp
# Created or Switched to collection names: my_gfg_collection
collection = db.unsanitizeddata
df = pd.DataFrame(list(collection.find({}, {'_id':0})))
for name in self.categorical:
df[name] = df[name].astype('category')
return df
def get_spans(self, df, partition, scale=None):
"""
:param df: the dataframe for which to calculate the spans
:param partition: the partition for which to calculate the spans
:param scale: if given, the spans of each column will be divided
by the value in `scale` for that column
: returns: The spans of all columns in the partition
"""
spans = {}
for column in df.columns:
if column in self.categorical:
processed = pd.Series()
for index, value in df[column][partition].items():
processed.set_value(index, self.remov_punct(str(value)))
span = len(processed.unique())
else:
processed = | pd.Series() | pandas.Series |
import itertools
import logging
import math
from datetime import datetime, timedelta, timezone
import boto3
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import awswrangler as wr
from ._utils import ensure_data_types, get_df_list
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
@pytest.mark.parametrize("partition_cols", [None, ["c2"], ["c1", "c2"]])
def test_parquet_metadata_partitions_dataset(path, partition_cols):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [3, 4, 5], "c2": [6, 7, 8]})
wr.s3.to_parquet(df=df, path=path, dataset=True, partition_cols=partition_cols)
columns_types, partitions_types = wr.s3.read_parquet_metadata(path=path, dataset=True)
partitions_types = partitions_types if partitions_types is not None else {}
assert len(columns_types) + len(partitions_types) == len(df.columns)
assert columns_types.get("c0") == "bigint"
assert (columns_types.get("c1") == "bigint") or (partitions_types.get("c1") == "string")
assert (columns_types.get("c1") == "bigint") or (partitions_types.get("c1") == "string")
@pytest.mark.parametrize("partition_cols", [None, ["c2"], ["value", "c2"]])
def test_parquet_cast_string_dataset(path, partition_cols):
df = pd.DataFrame({"id": [1, 2, 3], "value": ["foo", "boo", "bar"], "c2": [4, 5, 6], "c3": [7.0, 8.0, 9.0]})
wr.s3.to_parquet(df, path, dataset=True, partition_cols=partition_cols, dtype={"id": "string", "c3": "string"})
df2 = wr.s3.read_parquet(path, dataset=True).sort_values("id", ignore_index=True)
assert str(df2.id.dtypes) == "string"
assert str(df2.c3.dtypes) == "string"
assert df.shape == df2.shape
for col, row in tuple(itertools.product(df.columns, range(3))):
assert str(df[col].iloc[row]) == str(df2[col].iloc[row])
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_read_parquet_filter_partitions(path, use_threads):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [0, 1, 2], "c2": [0, 0, 1]})
wr.s3.to_parquet(df, path, dataset=True, partition_cols=["c1", "c2"], use_threads=use_threads)
df2 = wr.s3.read_parquet(
path, dataset=True, partition_filter=lambda x: True if x["c1"] == "0" else False, use_threads=use_threads
)
assert df2.shape == (1, 3)
assert df2.c0.iloc[0] == 0
assert df2.c1.astype(int).iloc[0] == 0
assert df2.c2.astype(int).iloc[0] == 0
df2 = wr.s3.read_parquet(
path,
dataset=True,
partition_filter=lambda x: True if x["c1"] == "1" and x["c2"] == "0" else False,
use_threads=use_threads,
)
assert df2.shape == (1, 3)
assert df2.c0.iloc[0] == 1
assert df2.c1.astype(int).iloc[0] == 1
assert df2.c2.astype(int).iloc[0] == 0
df2 = wr.s3.read_parquet(
path, dataset=True, partition_filter=lambda x: True if x["c2"] == "0" else False, use_threads=use_threads
)
assert df2.shape == (2, 3)
assert df2.c0.astype(int).sum() == 1
assert df2.c1.astype(int).sum() == 1
assert df2.c2.astype(int).sum() == 0
def test_parquet(path):
df_file = pd.DataFrame({"id": [1, 2, 3]})
path_file = f"{path}test_parquet_file.parquet"
df_dataset = pd.DataFrame({"id": [1, 2, 3], "partition": ["A", "A", "B"]})
df_dataset["partition"] = df_dataset["partition"].astype("category")
path_dataset = f"{path}test_parquet_dataset"
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(df=df_file, path=path_file, mode="append")
with pytest.raises(wr.exceptions.InvalidCompression):
wr.s3.to_parquet(df=df_file, path=path_file, compression="WRONG")
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(df=df_dataset, path=path_dataset, partition_cols=["col2"])
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(df=df_dataset, path=path_dataset, description="foo")
with pytest.raises(wr.exceptions.InvalidArgumentValue):
wr.s3.to_parquet(df=df_dataset, path=path_dataset, partition_cols=["col2"], dataset=True, mode="WRONG")
wr.s3.to_parquet(df=df_file, path=path_file)
assert len(wr.s3.read_parquet(path=path_file, use_threads=True, boto3_session=None).index) == 3
assert len(wr.s3.read_parquet(path=[path_file], use_threads=False, boto3_session=boto3.DEFAULT_SESSION).index) == 3
paths = wr.s3.to_parquet(df=df_dataset, path=path_dataset, dataset=True)["paths"]
with pytest.raises(wr.exceptions.InvalidArgument):
assert wr.s3.read_parquet(path=paths, dataset=True)
assert len(wr.s3.read_parquet(path=path_dataset, use_threads=True, boto3_session=boto3.DEFAULT_SESSION).index) == 3
dataset_paths = wr.s3.to_parquet(
df=df_dataset, path=path_dataset, dataset=True, partition_cols=["partition"], mode="overwrite"
)["paths"]
assert len(wr.s3.read_parquet(path=path_dataset, use_threads=True, boto3_session=None).index) == 3
assert len(wr.s3.read_parquet(path=dataset_paths, use_threads=True).index) == 3
assert len(wr.s3.read_parquet(path=path_dataset, dataset=True, use_threads=True).index) == 3
wr.s3.to_parquet(df=df_dataset, path=path_dataset, dataset=True, partition_cols=["partition"], mode="overwrite")
wr.s3.to_parquet(
df=df_dataset, path=path_dataset, dataset=True, partition_cols=["partition"], mode="overwrite_partitions"
)
def test_parquet_validate_schema(path):
df = | pd.DataFrame({"id": [1, 2, 3]}) | pandas.DataFrame |
#!/usr/bin/env python3
#
# Create model outputs with P.1203 software.
#
# Copyright 2018 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from itu_p1203.p1203Pv import P1203Pv
from itu_p1203.p1203Pq import P1203Pq
import pandas as pd
import yaml
import argparse
import json
import numpy as np
from tqdm import tqdm
tqdm.pandas()
DB_IDS = ['TR04', 'TR06', 'VL04', 'VL13']
ROOT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
def parse_mode3_features(pvs_id, features_mode3_path):
pvs_features = pd.read_csv(
os.path.join(
features_mode3_path,
pvs_id + '.csv')
)
return pvs_features
def calc_mode0_O22(row):
pvs_features = (int(row["coding_res"]),
int(row["display_res"]),
float(row["bitrate_kbps_segment_size"]),
int(row["framerate"]))
return P1203Pv.video_model_function_mode0(*pvs_features)
def calc_mode1_O22(row):
pvs_features = (int(row["coding_res"]),
int(row["display_res"]),
float(row["bitrate_kbps_segment_size"]),
int(row["framerate"]),
[],
float(row["iframe_ratio"]))
return P1203Pv.video_model_function_mode1(*pvs_features)
def calc_mode2_O22(row):
# check if fallback is needed
has_bitstream_data = "BS_TwoPercentQP1" in row.keys() and isinstance(row["BS_TwoPercentQP1"], str)
try:
avg_qp = eval(row["BS_TwoPercentQP1"])
except Exception as e:
has_bitstream_data = False
if has_bitstream_data:
frame_types = eval(row["types"])
frames = []
for ftyp, qp_values in zip(frame_types, avg_qp):
frames.append({
'type': ftyp,
'qpValues': [qp_values]
})
pvs_features = (
int(row["coding_res"]),
int(row["display_res"]),
int(row["framerate"]),
frames,
None,
[]
)
return P1203Pv.video_model_function_mode2(*pvs_features)
else:
# tqdm.write("Switching back to Mode 1 for PVS {}, sample index {}".format(row["pvs_id"], row["sample_index"]))
return None
def calc_mode3_O22(row):
frame_types = eval(row["types"])
avg_qp = eval(row["BS_Av_QPBB"])
frames = []
for ftyp, qp_values in zip(frame_types, avg_qp):
frames.append({
'type': ftyp,
'qpValues': [qp_values]
})
pvs_features = (
int(row["coding_res"]),
int(row["display_res"]),
float(row["framerate"]),
frames,
None,
[]
)
return P1203Pv.video_model_function_mode3(*pvs_features)
def calc_O46(O21, O22, device, stall_vec=[]):
l_buff = []
p_buff = []
if stall_vec:
for l, p in stall_vec:
l_buff.append(l)
p_buff.append(p)
pq_fun = P1203Pq(O21, O22, l_buff, p_buff, device)
return pq_fun.calculate()
def main(args):
db_data = pd.DataFrame()
O21_path = os.path.join(ROOT_PATH, 'data', 'O21.csv')
stalling_dir_path = os.path.join(ROOT_PATH, 'data', 'test_configs')
features_mode0_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode0.csv')
features_mode1_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode1.csv')
features_mode2_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode2')
features_mode3_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode3')
# read in data
# O21
O21_data = pd.read_csv(O21_path)
# stalling
yaml_per_db = {}
for db_id in DB_IDS:
yaml_per_db[db_id] = yaml.load(
open(os.path.join(stalling_dir_path, db_id + '-config.yaml')))
# read in from hdf-files if they exist, otherwise run pv-calc
if args.create_hdfs:
print('Calculating O22 scores for all modes ...')
# mode0 features
print('Reading mode 0 features ...')
mode0_features = pd.read_csv(features_mode0_path)
# mode1 features
print('Reading mode 1 features ...')
mode1_features = pd.read_csv(features_mode1_path)
# mode2 features
print('Reading mode 2 features (may take a while) ...')
pvss = mode1_features["pvs_id"].unique()
list_of_dataframes_for_mode2 = []
for pvs_id in tqdm(pvss):
pvs_data_all = pd.read_csv(os.path.join(features_mode2_path, pvs_id + '.csv'))
if "BS_TwoPercentQP1" in pvs_data_all.keys():
list_of_dataframes_for_mode2.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "sizes", "quant", "coding_res", "display_res", "BS_TwoPercentQP1"
]].copy()
)
else:
# no bitstream data available
list_of_dataframes_for_mode2.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "sizes", "coding_res", "display_res"
]].copy()
)
mode2_features = pd.concat(list_of_dataframes_for_mode2, ignore_index=True)
# mode3 features
print('Reading mode 3 features (may take a while) ...')
pvss = mode1_features["pvs_id"].unique()
list_of_dataframes_for_mode3 = []
for pvs_id in tqdm(pvss):
pvs_data_all = pd.read_csv(os.path.join(features_mode3_path, pvs_id + '.csv'))
list_of_dataframes_for_mode3.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "quant", "coding_res", "display_res", "BS_Av_QPBB"
]].copy()
)
mode3_features = pd.concat(list_of_dataframes_for_mode3, ignore_index=True)
# calc Pv
# mode0
print('Calculating mode 0 Pv')
mode0_features['O22'] = mode0_features.progress_apply(calc_mode0_O22, axis=1)
# mode1
print('Calculating mode 1 Pv')
mode1_features['O22'] = mode1_features.progress_apply(calc_mode1_O22, axis=1)
# mode2
print('Calculating mode 2 Pv')
mode2_features['O22'] = mode2_features.progress_apply(calc_mode2_O22, axis=1)
missing_values_indices = np.where(pd.isnull(mode2_features.O22))[0]
# go through each sample index that has no value yet
print('Re-calculating mode 2 Pv missing values')
for idx in tqdm(missing_values_indices):
# get required features from mode 1, ...
pvs_id = mode2_features.iloc[idx]['pvs_id']
sample_index = mode2_features.iloc[idx]['sample_index']
row = mode1_features.loc[(mode1_features["pvs_id"] == pvs_id) & (mode1_features["sample_index"] == sample_index)]
# and calculate Mode 1 score instead
mode1_O22 = calc_mode1_O22(row)
# overwrite data in Mode 2 data frame
# https://stackoverflow.com/a/43968774/435093
mode2_features.iat[idx, mode2_features.columns.get_loc("O22")] = mode1_O22
# mode3
print('Calculating mode 3 Pv')
mode3_features['O22'] = mode3_features.progress_apply(calc_mode3_O22, axis=1)
mode0_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode0')
mode1_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode1')
mode2_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode2')
mode3_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode3')
else:
if os.path.isfile(os.path.join(ROOT_PATH, "data_original", "save.h5")):
mode0_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode0')
mode1_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode1')
mode2_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode2')
mode3_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode3')
else:
print('No h5 file found, please rerun with -c flag')
quit()
# parse buffering data from -config.yaml
stalling_per_hrc = {}
for db_id in yaml_per_db:
for hrc_id in yaml_per_db[db_id]['hrcList']:
buffts = 0
buff_events = [] # ts, dur
for (event, ts) in yaml_per_db[db_id]['hrcList'][hrc_id]["eventList"]:
if event in ['stall', 'buffering']:
buff_events.append([buffts, ts])
else:
buffts += ts
stalling_per_hrc[hrc_id] = buff_events
pvss = mode1_features["pvs_id"].unique()
per_pvs_data = {}
print('Creating O21/22-json files ...')
O22_tocsv = pd.DataFrame(columns=['pvs_id', 'mode', 'sample_index', 'O22'])
list_to_concat = []
for pvs_id in tqdm(pvss):
database_id = pvs_id.split('_')[0]
if database_id not in DB_IDS:
print("WARNING: Saved PVS {} not in required DBs".format(pvs_id))
continue
src_id = pvs_id.split('_')[1]
hrc_id = pvs_id.split('_')[2]
per_pvs_data[pvs_id] = {}
per_pvs_data[pvs_id]['O21'] = O21_data[O21_data["pvs_id"] ==
pvs_id].sort_values(by=['sample_index'])["O21"].tolist()
per_pvs_data[pvs_id]['O22'] = {}
per_pvs_data[pvs_id]['O22']['mode0'] = \
mode0_features[mode0_features["pvs_id"] == pvs_id].sort_values(by=['sample_index'])["O22"].tolist()
per_pvs_data[pvs_id]['O22']['mode1'] = \
mode1_features[mode1_features["pvs_id"] == pvs_id].sort_values(by=['sample_index'])["O22"].tolist()
per_pvs_data[pvs_id]['O22']['mode2'] = \
mode2_features[mode2_features["pvs_id"] == pvs_id].sort_values(by=['sample_index'])["O22"].tolist()
per_pvs_data[pvs_id]['O22']['mode3'] = \
mode3_features[mode3_features["pvs_id"] == pvs_id].sort_values(by=['sample_index'])["O22"].tolist()
per_pvs_data[pvs_id]['I23'] = stalling_per_hrc[hrc_id]
per_pvs_data[pvs_id]['IGen'] = {}
per_pvs_data[pvs_id]['IGen']['displaySize'] = str(int(
yaml_per_db[database_id]["displayHeight"] * 1.7777778)) + 'x' + str(yaml_per_db[database_id]["displayHeight"])
# this should be inserted below when producing the o46 scores
per_pvs_data[pvs_id]['IGen']['device'] = ''
# write .json outputs from pv (O21, O22, i23, igen)
for mode_id in ['mode0', 'mode1', 'mode2', 'mode3']:
csv_index = 0
for o22_sample in per_pvs_data[pvs_id]['O22'][mode_id]:
csv_row = {
'pvs_id': pvs_id, 'mode': mode_id[-1], 'sample_index': csv_index, 'O22': o22_sample}
csv_row_df = pd.DataFrame(csv_row, index=[0])
list_to_concat.append(csv_row_df)
csv_index += 1
os.makedirs(os.path.join(ROOT_PATH, 'data', mode_id), exist_ok=True)
json_filename = os.path.join(
ROOT_PATH,
'data',
mode_id,
'O21O22-{}.json'.format(pvs_id)
)
#- `data/mode0/O21O22-TR01_SRCxxx_HRCxxx.json`
data_to_write = per_pvs_data[pvs_id].copy()
data_to_write['O22'] = per_pvs_data[pvs_id]['O22'][mode_id]
with open(json_filename, 'w') as outfile:
json.dump(data_to_write, outfile)
print('Writing O22 CSV file ...')
O22_tocsv = pd.concat(list_to_concat, ignore_index=True)
O22_tocsv.to_csv(
os.path.join(ROOT_PATH, 'data', 'O22.csv'),
columns=['pvs_id', 'mode', 'sample_index', 'O22'],
index=False
)
# calc pq for each .json.
print('Calculating Pq-scores ...')
O46_tocsv = pd.DataFrame(columns=['pvs_id', 'mode', 'context', 'O46'])
list_to_concat = []
for pvs_id in tqdm(pvss):
pvs_data = per_pvs_data[pvs_id]
for device in ['mobile', 'pc']:
for curr_mode in pvs_data['O22']:
O46_vals = calc_O46(
pvs_data['O21'], pvs_data['O22'][curr_mode], device, pvs_data['I23'])
# O21, O22, O23, O34, O35, O46, mode
O46_output_data = {}
O46_output_data['O23'] = O46_vals['O23']
O46_output_data['O34'] = O46_vals['O34']
O46_output_data['O35'] = O46_vals['O35']
O46_output_data['O46'] = O46_vals['O46']
O46_output_data['O22'] = pvs_data['O22'][curr_mode]
O46_output_data['O21'] = pvs_data['O21']
O46_output_data['mode'] = curr_mode[-1]
# write o46-jsons
#- `data/mode0/O46-TR01_SRCxxx_HRCxxx-pc.json`
csv_row = {
'pvs_id': pvs_id,
'mode': curr_mode[-1],
'context': device,
'O46': O46_vals['O46']
}
csv_row_df = pd.DataFrame(csv_row, index=[0])
list_to_concat.append(csv_row_df)
json_filename = os.path.join(
ROOT_PATH,
'data',
curr_mode,
"046-{pvs_id}-{device}.json".format(**locals())
)
with open(json_filename, 'w') as outfile:
json.dump(O46_output_data, outfile)
print('Writing O46 CSV file ...')
O46_tocsv = | pd.concat(list_to_concat, ignore_index=True) | pandas.concat |
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from python.ftm.plot_events import get_refuel_events_from_events_csv, get_total_walking_distances_from_events_csv, \
get_parking_events_from_events_csv
from python.ftm.util import get_run_dir, get_latest_run, get_iteration_dir, get_last_iteration
def plot_iteration(iteration, row, col, y_min_value, y_max_value):
# Setup directory
working_dir = get_iteration_dir(run_dir, iteration)
print("-------------- ITERATION", iteration, "--------------------")
print("Working on path: ", working_dir)
df_events_refueling = get_refuel_events_from_events_csv(working_dir + "events.csv")
size_before_filtering = len(df_events_refueling.index)
df_events_refueling = df_events_refueling[df_events_refueling.fuel > 0]
print('Refuel_Event_Analysis: Ignored ', size_before_filtering - len(df_events_refueling.index), ' events with 0 fuel')
print("Total of ", len(df_events_refueling.index), "Refuel Events")
# Group by charger
x = pd.Series()
y = | pd.Series() | pandas.Series |
"""
Copyright 2022 HSBC Global Asset Management (Deutschland) GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
import pyratings as rtg
from tests import conftest
@pytest.fixture(scope="session")
def rtg_inputs_longterm():
return pd.DataFrame(
data={
"rtg_sp": ["AAA", "AA-", "AA+", "BB-", "C", np.nan, "BBB+", "AA"],
"rtg_moody": ["Aa1", "Aa3", "Aa2", "Ba3", "Ca", np.nan, np.nan, "Aa2"],
"rtg_fitch": ["AA-", np.nan, "AA-", "B+", "C", np.nan, np.nan, "AA"],
}
)
@pytest.fixture(scope="session")
def rtg_inputs_shortterm():
return pd.DataFrame(
data={
"rtg_sp": ["A-1", "A-3", "A-1+", "D", "B", np.nan, "A-2", "A-3"],
"rtg_moody": ["P-2", "NP", "P-1", "NP", "P-3", np.nan, np.nan, "P-3"],
"rtg_fitch": ["F1", np.nan, "F1", "F3", "F3", np.nan, np.nan, "F3"],
}
)
def test_get_best_rating_longterm_with_explicit_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_longterm_with_inferring_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_longterm, tenor="long-term")
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_explicit_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_shortterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="short-term",
)
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_inferring_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_shortterm, tenor="short-term")
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_second_best_rating_longterm_with_explicit_rating_provider(
rtg_inputs_longterm,
):
"""Test computation of second-best ratings on a security (line-by-line) basis."""
actual = rtg.get_second_best_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AA+", "AA-", "AA", "BB-", "C", np.nan, "BBB+", "AA"],
name="second_best_rtg",
)
| pd.testing.assert_series_equal(actual, expectations) | pandas.testing.assert_series_equal |
import math
import os
import sys
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
from cell_defination import *
sys.path.insert(1, r'C:\Users\bunny\PycharmProjects\vccf_visualization')
import utils.kidney_nuclei_vessel_calculate as my_csv
def generate_one_line_df(df, key):
line_x = [None] * (len(df) * 2)
line_y = [None] * (len(df) * 2)
line_z = [None] * (len(df) * 2)
line_x[::2] = df[f"n{key}x"]
line_y[::2] = df[f"n{key}y"]
line_z[::2] = df[f"n{key}z"]
line_x[1::2] = df["nx"]
line_y[1::2] = df["ny"]
line_z[1::2] = df["nz"]
l_data = dict()
l_data["x"] = line_x
l_data["y"] = line_y
l_data["z"] = line_z
# v_color = ['red'] * len(vessel_x_list)
# v_data["color"] = v_color
l_df = | pd.DataFrame(l_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
from deprecated import deprecated
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
from tools.metrics_ import PPTS
from config.globalLog import logger
def dump_train_dev_test_to_csv(
path,
train_y=None,
train_pred=None,
train_nse=None,
train_mse=None,
train_nrmse=None,
train_mae=None,
train_mape=None,
train_ppts=None,
dev_y=None,
dev_pred=None,
dev_nse=None,
dev_mse=None,
dev_nrmse=None,
dev_mae=None,
dev_mape=None,
dev_ppts=None,
test_y=None,
test_pred=None,
test_nse=None,
test_mse=None,
test_nrmse=None,
test_mae=None,
test_mape=None,
test_ppts=None,
time_cost=None,
):
"""
Dump training and developing records and predictions as well as r square to excel.
Args:
path: The local disk path to dump data into.
train_y: train records with Dataframe type.
train_pred: train predictions with numpy array type.
train_nse: R square value for training records and predictions, type float.
train_nrmse: Normalized root mean square error value for training records and predictions, type float.
train_mae: Mean absolute error value for training records and predictions, type float.
train_mape: Mean absolute percentage error value for training records and predictions, type float.
train_ppts: Peak percentage threshold statistic value for training records and predictions, type float.
dev_y: developing records with Dataframe type.
dev_pred: developing predictions with numpy array type.
dev_nse: R square value for development records and predictions, type float.
dev_nrmse: Normalized root mean square error value for development records and predictions, type float.
dev_mae: Mean absolute error value for development records and predictions, type float.
dev_mape: Mean absolute percentage error value for development records and predictions, type float.
dev_ppts: Peak percentage threshold statistic value for development records and predictions, type float.
test_y: testing records with Dataframe type.
test_pred: testing predictions with numpy array type.
test_nse: R square value for testing records and predictions, type float.
test_nrmse: Normalized root mean square error value for testing records and predictions, type float.
test_mae: Mean absolute error value for testing records and predictions, type float.
test_mape: Mean absolute percentage error value for testing records and predictions, type float.
test_ppts: Peak percentage threshold statistic value for testing records and predictions, type float.
time_cost: Time cost for profiling, type float.
"""
index_train = pd.Index(np.linspace(0, train_y.size-1, train_y.size))
index_dev = pd.Index(np.linspace(0, dev_y.size-1, dev_y.size))
index_test = pd.Index(np.linspace(0, test_y.size-1, test_y.size))
# convert the train_pred numpy array into Dataframe series
train_y = pd.DataFrame(list(train_y), index=index_train, columns=['train_y'])['train_y']
train_pred = pd.DataFrame(data=train_pred, index=index_train,columns=['train_pred'])['train_pred']
train_nse = pd.DataFrame([train_nse], columns=['train_nse'])['train_nse']
train_mse = pd.DataFrame([train_mse], columns=['train_mse'])['train_mse']
train_nrmse = pd.DataFrame([train_nrmse], columns=['train_nrmse'])['train_nrmse']
train_mae = pd.DataFrame([train_mae], columns=['train_mae'])['train_mae']
train_mape = | pd.DataFrame([train_mape], columns=['train_mape']) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import logging
import random
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import mct.Constants as Constants
import mct.Utilities as Utils
from mct.HypothesisTester import chi_square_bias_test
class BiasTester(object):
"""
Perform a bias check between the control and treatment dataframes.
"""
__group_control = 'group_control'
__group_treatment = 'group_treatment'
__index_group = 'index_group'
__rf_propensity_scores = 'rf_propensity_scores'
def __init__(self, config: json):
self.config = config
self.__logger = logging.getLogger("mct")
return
def check_bias(self, control_df: pd.DataFrame, treatment_df: pd.DataFrame) -> \
(pd.DataFrame, pd.DataFrame, pd.DataFrame, bool):
"""
# Compares the destruction of invariant features separately and flag any stat sig
# difference that satisfies the given minimum percentage deviation threshold
:param control_df: control dataframe
:param treatment_df: treatment dataframe
:return:
"""
self.__logger.debug('Checking for Population Bias')
invariant_features = self.config[Constants.invariant_columns]
p_value_threshold = self.config[Constants.p_value_threshold]
percentage_deviation_threshold = self.config[Constants.resample_threshold]
small_bin_percent_threshold = self.config[Constants.small_bin_percent_threshold]
bias_results, deviation, is_biased = chi_square_bias_test(control_df[invariant_features],
treatment_df[invariant_features],
groups=[Constants.control_group,
Constants.treatment_group],
group_column_name=Constants.group_column_name,
other_threshold=small_bin_percent_threshold,
p_value=0.01)
bias_results[Constants.num_of_bins] = bias_results[Constants.degree_of_freedom] + 1
bias_results[Constants.resample] = 'no'
bias_results.loc[(bias_results[Constants.percentage_deviation] > percentage_deviation_threshold)
& (bias_results[Constants.p_value_threshold] < p_value_threshold),
Constants.resample] = 'yes'
# Sort and round Bias results
bias_results = bias_results.sort_values(by=Constants.percentage_deviation, ascending=False)
bias_results.sort_values(by=[Constants.percentage_deviation, Constants.feature], ascending=False, inplace=True)
is_biased = is_biased and (bias_results[Constants.resample] == 'yes').any()
self.__logger.info("Is Data biased: {0}".format(is_biased))
# Sort and round deviations.
deviation.sort_values(
by=[Constants.feature, Constants.bin_column],
ascending=False,
inplace=True)
return bias_results, deviation, is_biased
def normalize_bias(self, control: pd.DataFrame, treatment: pd.DataFrame, bias_results: pd.DataFrame,
random_state=None) -> (pd.DataFrame, pd.DataFrame):
"""
Normalize and correct for the major biases.
bias_results - needs to include columns to normalize, and dof
"""
self.__logger.debug("Bias Normalization: started")
Utils.add_group_columns(control, treatment)
if self.config[Constants.normalization_type] != 'rf':
message = 'Currently only supported normalization type is random forest'
self.__logger.error(message)
raise Exception(message)
if not bias_results.empty:
resample_columns = bias_results[Constants.feature]
max_categories = bias_results[Constants.num_of_bins]
data_splits = [(self.__group_control, control), (self.__group_treatment, treatment)]
feature_transforms = [('categorical', x, y) for x, y in zip(resample_columns, max_categories)]
self.__logger.info('Using RF propensity scores with caliper based matching.')
# Get data after sampling.
df_metric = self.__sample_propensity(data_splits, feature_transforms, random_state=random_state)
df_control = df_metric[df_metric[Constants.group_column_name] == Constants.control_group]
df_treatment = df_metric[df_metric[Constants.group_column_name] == Constants.treatment_group]
return df_control, df_treatment
else:
self.__logger.info("Bias Normalization skipped.")
self.__logger.debug("Bias Normalization finished. ")
# Transform the input data
def __transform(self, input_frame, features):
train = | pd.DataFrame(index=input_frame.index) | pandas.DataFrame |
import shutil
import numpy as np
import pandas as pd
import logging
import time as _time
import requests
import random
from tqdm import tqdm
from concurrent import futures
from datetime import datetime, date, time
from dateutil.relativedelta import relativedelta, FR
from opensignals import utils
logger = logging.getLogger(__name__)
AWS_BASE_URL='https://numerai-signals-public-data.s3-us-west-2.amazonaws.com'
SIGNALS_UNIVERSE=f'{AWS_BASE_URL}/latest_universe.csv'
SIGNALS_TICKER_MAP=f'{AWS_BASE_URL}/signals_ticker_map_w_bbg.csv'
SIGNALS_TARGETS=f'{AWS_BASE_URL}/signals_train_val_bbg.csv'
def get_tickers():
ticker_map = pd.read_csv(SIGNALS_TICKER_MAP)
ticker_map = ticker_map.dropna(subset=['yahoo'])
logger.info(f'Number of eligible tickers: {ticker_map.shape[0]}')
if ticker_map['yahoo'].duplicated().any():
raise Exception(
f'Found duplicated {ticker_map["yahoo"].duplicated().values().sum()}'
' yahoo tickers'
)
if ticker_map['bloomberg_ticker'].duplicated().any():
raise Exception(
f'Found duplicated {ticker_map["bloomberg_ticker"].duplicated().values().sum()}'
' bloomberg_ticker tickers'
)
return ticker_map
def get_ticker_data(db_dir):
ticker_data = pd.DataFrame({
'bloomberg_ticker' : pd.Series([], dtype='str'),
'date' : | pd.Series([], dtype='datetime64[ns]') | pandas.Series |
import pytest
import logging
import datetime
import json
import pandas as pd
from astropy.table import Table
from b_to_zooniverse import upload_decals
# logging.basicConfig(
# format='%(asctime)s %(message)s',
# level=logging.DEBUG)
@pytest.fixture
def calibration_dir(tmpdir):
return tmpdir.mkdir('calibration_dir').strpath
@pytest.fixture
def fits_dir(tmpdir):
return tmpdir.mkdir('fits_dir').strpath
@pytest.fixture
def png_dir(tmpdir):
return tmpdir.mkdir('png_dir').strpath
@pytest.fixture
def fits_loc(fits_dir):
return fits_dir + '/' + 'test_image.fits'
@pytest.fixture
def png_loc(png_dir):
return png_dir + '/' + 'test_image.png'
@pytest.fixture()
def nsa_catalog():
return Table([
{'iauname': 'gal_a',
'ra': 146.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.},
# adversarial example identical to gal_a
{'iauname': 'gal_dr1',
'ra': 14.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.},
{'iauname': 'gal_dr2',
'ra': 1.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
])
@pytest.fixture()
def fake_metadata():
# manifest expects many columns from joint catalog
return {
'petrotheta': 4.,
'petroflux': [20., 21., 22., 23., 24., 25., 26.],
'nsa_version': '1_0_0',
'z': 0.1,
'mag': [0., 1., 2., 3., 4., 5., 6.],
'absmag': [10., 11., 12., 13., 14., 15., 16.],
'nmgy': [30., 31., 32., 33., 34., 35., 36.],
'another_column': 'sadness'}
@pytest.fixture()
def joint_catalog(fits_dir, png_dir, fake_metadata):
# saved from downloader, which adds fits_loc, png_loc and png_ready to nsa_catalog + decals bricks
gal_a = {
'iauname': 'gal_a',
'nsa_id': 0,
'fits_loc': '{}/gal_a.fits'.format(fits_dir),
'png_loc': '{}/gal_a.png'.format(png_dir),
'png_ready': True,
'ra': 146.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
gal_a.update(fake_metadata)
gal_dr1 = {
'iauname': 'gal_dr1',
'nsa_id': 1,
'fits_loc': '{}/gal_b.fits'.format(fits_dir),
'png_loc': '{}/gal_b.png'.format(png_dir),
'png_ready': True,
'ra': 14.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
gal_dr1.update(fake_metadata)
gal_dr2 = {
'iauname': 'gal_dr2',
'nsa_id': 2,
'fits_loc': '{}/gal_c.fits'.format(fits_dir),
'png_loc': '{}/gal_c.png'.format(png_dir),
'png_ready': True,
'ra': 1.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
gal_dr2.update(fake_metadata)
return Table([gal_a, gal_dr1, gal_dr2])
@pytest.fixture()
def previous_subjects():
# loaded from GZ data dump. Metadata already corrected in setup (name = main stage).
return Table([
# DR1 entries have provided_image_id filled with iau name, nsa_id blank, dr blank
{'_id': 'ObjectId(0)',
'zooniverse_id': 'gz_dr1',
'iauname': 'gal_dr1',
'nsa_id': 1,
'dr': 'DR1',
'ra': 14.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.
},
# DR2 entries have provided_image_id blank, nsa_id filled with NSA_[number], dr filled with 'DR2'
{'_id': 'ObjectId(1)',
'zooniverse_id': 'gz_dr2',
'iauname': 'gal_dr2',
'nsa_id': 2,
'dr': 'DR2',
'ra': 1.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.
}
])
@pytest.fixture()
def expert_catalog():
return Table([
{
# gal a is both a bar and ring galaxy, and so should be included in the calibration set
'iauname': 'gz_a',
'ra': 146.,
'dec': -1.,
'bar': 2 ** 5,
'ring': 2 ** 3,
}
])
#
# def test_upload_decals_to_panoptes(joint_catalog, previous_subjects, expert_catalog, calibration_dir):
# # TODO mock the uploader here
# main_subjects, calibration_subjects = upload_decals_to_panoptes(
# joint_catalog, previous_subjects, expert_catalog, calibration_dir)
#
# print(main_subjects)
# print(calibration_subjects)
#
# assert len(main_subjects) == 1
# assert len(calibration_subjects) == len(main_subjects) * 2
#
# first_main_subject = main_subjects[0]
# assert first_main_subject['png_loc'][-17:] == 'png_dir/gal_a.png'
# assert first_main_subject['key_data']['ra'] == 146.0
# assert first_main_subject['key_data']['dec'] == -1.
# assert first_main_subject['key_data']['nsa_id'] == 0
# assert first_main_subject['key_data']['petroth50'] == 2.0
# assert first_main_subject['key_data']['mag_abs_r'] == 14.0
# TODO better unit tests for calibration image manifest
# wrong, should have 1 of each version not two
# assert calibration_subjects[0]['png_loc'][-29:] == 'calibration_dir/gal_a_dr2.png'
# assert calibration_subjects[0]['key_data']['selected_image'] == 'dr2_png_loc'
# assert calibration_subjects[1]['png_loc'][-32:] == 'calibration_dir/gal_a_colour.png'
@pytest.fixture()
def subject_extract():
return pd.DataFrame([
{
'subject_id': 'classified', # gal_dr2 should be removed from joint catalog - has been uploaded/classified
'workflow_id': '6122',
'metadata': json.dumps({ # read by subject loader
'ra': 1., # in joint catalog as 'gal_a'
'dec': -1,
'locations': json.dumps({'0': 'url.png'}) # expected by subject loader. Value is itself a json.
})
},
{
'subject_id': 'used_twice',
'workflow_id': '6122',
'metadata': json.dumps({
'ra': 146., # should still exclude gal_a
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
{
'subject_id': 'used_twice',
'workflow_id': '9999', # duplicate subject due to being attached to another workflow
'metadata': json.dumps({
'ra': 146., # should still exclude gal_a
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
{
'subject_id': 'different_workflow',
'workflow_id': '9999',
'metadata': json.dumps({
'ra': 14., # should NOT exclude gal_dr1, classified elsewhere
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
{
'subject_id': 'early',
'workflow_id': '6122',
'metadata': json.dumps({
'ra': 146., # should NOT exclude gal_dr1, classified early
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
])
@pytest.fixture()
def classification_extract(): # note: subject_ids, with an s, from Panoptes
return pd.DataFrame([
{
'subject_ids': 'classified',
'created_at': '2018-01-01', # should ensure gal_dr2 is removed for being classified
'workflow_id': '6122'
},
{
'subject_ids': 'used_twice',
'created_at': pd.to_datetime('2018-01-01'), # is already datetime, should not throw error
'workflow_id': '6122'
},
{
'subject_ids': 'used_twice', # should still exclude gal_dr2 even though used twice
'created_at': | pd.to_datetime('2018-01-01') | pandas.to_datetime |
import sklearn.linear_model
import sklearn.preprocessing
import sklearn.metrics
import pandas as pd
import numpy as np
import dataset_categories
import mushroom_classifier
"""
This class is used for reliable classification results, running the main method or
using the high level functions of mushroom_classifier.py for classification produces
flawed results. The bugs will be fixed in a later version.
"""
if __name__ == "__main__":
# import and data and asign to variables
data_primary = pd.read_csv(dataset_categories.FILE_PATH_PRIMARY_EDITED, header=0, sep=';')
data_secondary = | pd.read_csv(dataset_categories.FILE_PATH_SECONDARY_NO_MISS, header=0, sep=';') | pandas.read_csv |
import warnings
import cvxpy as cp
import numpy as np
import numpy.linalg as la
import pandas as pd
import scipy.stats as st
from _solver_fast import _cd_solver
from linearmodels.iv import IV2SLS, compare
from patsy import dmatrices
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.utils.validation import (
check_array,
check_is_fitted,
check_random_state,
check_X_y,
)
from statsmodels.api import add_constant
class Rlasso(BaseEstimator, RegressorMixin):
"""
Rigorous Lasso and Sqrt-Lasso estimator with
theoretically motivated and data-driven penalty level.
Parameters
----------
post: bool, default=True
If True, post-lasso is used to estimate betas,
meaning that features selected by rlasso are
estimated by OLS in the final model, as outlined
in [2]_.
sqrt: bool, default=False
If True, square-root lasso criterion is minimized
is minimized instead of normal lasso. See [1]_ and
notes below for details.
fit_intercept: bool, default=True
If True, an unpenalized intercept is estimated
by mean centering the data prior to estimation.
cov_type: str, default="nonrobust"
Type of covariance matrix. Right now the
supported types are: "nonrobust", "robust".
x_dependent: bool, default=False
If True, the alternative and less conservative lambda
is estimated by simulation using the conditional
distribution of the design matrix.
n_sim: int, default=5000
Number of simulations to be performed for x-dependent
lambda calculation.
random_state: int, default=None
Random seed used for simulations if `x_dependent` is
set to `True`.
lasso_psi: bool, default=False
By default post-lasso is the default method for obtaining
residuals in the
prestd: bool, default=False
If True, the data is prestandardized instead of
on the fly by penalty loadings. Currently only
supports homoscedastic case.
n_corr: int, default=5
Number of most correlated variables to be used in the
for initial calculation of the residuals.
c: float, default=1.1
Slack parameter used in the lambda calculation. From
[3]_ "c needs to be greater than 1 for the regularization
event to hold asymptotically, but not too high as the
shrinkage bias is increasing in c."
gamma: float, optional=None
Regularization parameter, where the probability of
selecting the correct model is given by 1-gamma.
If not specified, the the value is set to:
0.1 / np.log(n)
max_iter: int, default=2
Maximum number of iterations to perform in the iterative
estimation procedure to obtain the Rlasso estimates.
conv_tol: float, default=1e-4
Tolerance for the convergence of the iterative estimation
procedure.
solver: str, default="cd"
Solver to be used for the iterative estimation procedure.
Alternatives are:
"cd" - coordinate descent method.
"cvxpy" - cvxpy solver.
cd_max_iter: int, default=10000
Maximum number of iterations to be perform by the coordinate
descent algorithm before stopping.
cd_tol: float, default=1e-10
Convergence tolerance for the coordinate descent algorithm.
cvxpy_opts: dict, default=None
Additional options to be passed to the cvxpy solver. See cvxpy
documentation for more details:
https://www.cvxpy.org/tutorial/advanced/index.html#solve-method-options
zero_tol: float, default=1e-4
Tolerance for the rounding of estimated coefficients to zero.
Attributes
----------
coef_: numpy.array, shape (n_features,)
Estimated coefficients.
intercept_: float
Estimated intercept.
lambd_: float
Estimated lambda/overall penalty level.
psi_: numpy.array, shape (n_features, n_features)
Estimated penalty loadings.
n_iter_: int
Number of iterations performed by the rlasso algorithm.
n_features_in_: int
Number of features in the input data.
n_samples_: int
Number of samples/observations in the input data.
feature_names_in_: str
Feature names of ``X``. Only stored if
the input data is of type ``pd.DataFrame``.
Notes
-----
Rlasso minimizes the following loss function:
.. math:: \widehat{\\beta} = \\arg \min \\frac{1}{n} \lVert y_i - x_i'\\beta \\rVert_2^2 +\\frac{\lambda}{n} \sum^p_{j=1}\psi_j|\\beta_j|
Or in the case of square-root lasso when ``sqrt=True``:
.. math:: \widehat{\\beta} = \\arg \min \\frac{1}{\sqrt{n}} \lVert y_i - x_i'\\beta \\rVert_2 + \\frac{\lambda}{n} \sum^p_{j=1}\psi_j|\\beta_j|
Where :math:`\psi_{j}` are regressor specific penalty loadings and
:math:`\lambda` is the overall penalty level. For an introduction to
the rigorous lasso algorithm to estimate the penalty loadings and
the overall penalty level see [3]_ and [4]_.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2011).
Square-root lasso: pivotal recovery of sparse signals via conic programming.
Biometrika, 98(4), 791-806.
.. [2] <NAME>., & <NAME>. (2013). Least squares after model selection
in high-dimensional sparse models. Bernoulli, 19(2), 521-547.
.. [3] <NAME>., <NAME>., & <NAME>. (2020). lassopack: Model
selection and prediction with regularized regression in Stata.
The Stata Journal, 20(1), 176-235.
.. [4] <NAME>., <NAME>., & <NAME>. (2016).
hdm: High-dimensional metrics. arXiv preprint arXiv:1608.00354.
Examples
--------
>>> import numpy as np
>>> from rlasso import Rlasso
>>> X = np.random.randn(100, 5)
>>> y = np.random.randn(100)
>>> rlasso = Rlasso()
>>> rlasso.fit(X, y)
"""
def __init__(
self,
*,
post=True,
sqrt=False,
fit_intercept=True,
cov_type="nonrobust",
x_dependent=False,
random_state=None,
lasso_psi=False,
prestd=False,
n_corr=5,
max_iter=2,
conv_tol=1e-4,
n_sim=5000,
c=1.1,
gamma=None,
solver="cd",
cd_max_iter=1000,
cd_tol=1e-10,
cvxpy_opts=None,
zero_tol=1e-4,
):
self.post = post
self.sqrt = sqrt
self.fit_intercept = fit_intercept
self.cov_type = cov_type
self.x_dependent = x_dependent
self.random_state = random_state
self.lasso_psi = lasso_psi
self.prestd = prestd
self.n_corr = n_corr
self.max_iter = max_iter
self.conv_tol = conv_tol
self.n_sim = n_sim
self.c = c
self.gamma = gamma
self.solver = solver
self.cd_max_iter = cd_max_iter
self.cd_tol = cd_tol
self.zero_tol = zero_tol
self.cvxpy_opts = cvxpy_opts
def _psi_calc(self, X, n, v=None):
"""Calculate the penalty loadings."""
# TODO Implement cluster robust covariance
# if prestandardized X, set loadings to ones
if self.prestd:
psi = np.ones(self.n_features_in_)
# sqrt case
elif self.sqrt:
if self.cov_type == "nonrobust":
psi = np.sqrt(np.mean(X**2, axis=0))
# heteroscedastic robust case
elif self.cov_type == "robust" and v is not None:
Xv2 = np.einsum("ij, i -> j", X**2, v**2)
psi_1 = np.sqrt(np.mean(X**2, axis=0))
psi_2 = np.sqrt(Xv2 / np.sum(v**2))
psi = np.maximum(psi_1, psi_2)
# clustered
else:
raise NotImplementedError("Cluster robust loadings not implemented")
elif self.cov_type == "nonrobust":
psi = np.sqrt(np.mean(X**2, axis=0))
elif self.cov_type == "robust" and v is not None:
Xe2 = np.einsum("ij, i -> j", X**2, v**2)
psi = np.sqrt(Xe2 / n)
else:
raise NotImplementedError("Cluster robust loadings not implemented")
if self.nopen_idx_:
psi[self.nopen_idx_] = 0.0
return psi
def _lambd_calc(
self,
n,
p,
X,
*,
v=None,
s1=None,
psi=None,
): # sourcery skip: remove-redundant-if
"""Calculate the lambda/overall penalty level."""
# TODO Always return both lambda and lambda scaled by RMSE
# for the purpose of comparison between specifications.
# TODO: Implement cluster robust case
# empirical gamma if not provided
gamma = self.gamma or 0.1 / np.log(n)
if psi is not None:
psi = np.diag(psi)
if self.sqrt:
lf = self.c
# x-independent (same for robust and nonrobust)
if not self.x_dependent:
prob = st.norm.ppf(1 - (gamma / (2 * p)))
lambd = lf * np.sqrt(n) * prob
elif self.cov_type == "nonrobust":
Xpsi = X @ la.inv(psi)
sims = np.empty(self.n_sim)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sg = np.mean(g**2)
sims[r] = sg * np.max(np.abs(np.sum(Xpsi * g, axis=0)))
lambd = lf * np.quantile(sims, 1 - gamma)
elif self.cov_type == "robust":
Xpsi = X @ la.inv(psi)
sims = np.empty(self.n_sim)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sg = np.mean(g**2)
sims[r] = sg * np.max(np.abs(np.sum(Xpsi * v[:, None] * g, axis=0)))
lambd = lf * np.quantile(sims, 1 - gamma)
else:
raise NotImplementedError("Cluster robust penalty not implemented")
else:
lf = 2 * self.c
# homoscedasticity and x-independent case
if self.cov_type == "nonrobust" and not self.x_dependent:
assert s1 is not None
proba = st.norm.ppf(1 - (gamma / (2 * p)))
# homoscedastic/non-robust case
lambd = lf * s1 * np.sqrt(n) * proba
elif self.cov_type == "nonrobust" and self.x_dependent:
assert psi is not None
sims = np.empty(self.n_sim)
Xpsi = X @ la.inv(psi)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sims[r] = np.max(np.abs(np.sum(Xpsi * g, axis=0)))
lambd = lf * s1 * np.quantile(sims, 1 - gamma)
# heteroscedastic/cluster robust and x-independent case
elif self.cov_type in ("robust", "cluster") and not self.x_dependent:
proba = st.norm.ppf(1 - (gamma / (2 * p)))
lambd = lf * np.sqrt(n) * proba
# heteroscedastic/cluster robust and x-dependent case
elif self.cov_type == "robust" and self.x_dependent:
sims = np.empty(self.n_sim)
Xpsi = X @ la.inv(psi)
for r in range(self.n_sim):
g = self.random_state_.normal(size=(n, 1))
sims[r] = np.max(np.abs(np.sum(Xpsi * v[:, None] * g, axis=0)))
lambd = lf * np.quantile(sims, 1 - gamma)
# heteroscedastic/cluster robust and x-dependent case
else:
raise NotImplementedError("Cluster robust penalty not implemented")
return lambd
def _cvxpy_solver(
self,
X,
y,
lambd,
psi,
n,
p,
):
"""
Solve the lasso problem using cvxpy
"""
beta = cp.Variable(p)
if self.sqrt:
loss = cp.norm2(y - X @ beta) / cp.sqrt(n)
else:
loss = cp.sum_squares(y - X @ beta) / n
reg = (lambd / n) * cp.norm1(np.diag(psi) @ beta)
objective = cp.Minimize(loss + reg)
prob = cp.Problem(objective)
prob.solve(**self.cvxpy_opts or {})
# round beta to zero if below threshold
beta = beta.value
beta[np.abs(beta) < self.zero_tol] = 0.0
return beta
def _OLS(self, X, y):
"""
Solve the OLS problem
"""
# add dim if X is 1-d
if X.ndim == 1:
X = X[:, None]
try:
return la.solve(X.T @ X, X.T @ y)
except la.LinAlgError:
warnings.warn("Singular matrix encountered. invoking lstsq solver for OLS")
return la.lstsq(X, y, rcond=None)[0]
def _post_lasso(self, beta, X, y):
"""Replace the non-zero lasso coefficients by OLS."""
nonzero_idx = np.where(beta != 0)[0]
X_sub = X[:, nonzero_idx]
post_beta = self._OLS(X_sub, y)
beta[nonzero_idx] = post_beta
return beta
def _starting_values(self, XX, Xy, lambd, psi):
"""Calculate starting values for the lasso."""
if self.sqrt:
return la.solve(XX + lambd * np.diag(psi**2), Xy)
else:
return la.solve(XX * 2 + lambd * np.diag(psi**2), Xy * 2)
def _fit(self, X, y, *, nopen_idx=None):
"""Helper function to fit the model."""
if self.max_iter < 0:
raise ValueError("`max_iter` cannot be negative")
if self.cov_type not in ("nonrobust", "robust"):
raise ValueError("cov_type must be one of 'nonrobust', 'robust'")
if self.solver not in ("cd", "cvxpy"):
raise ValueError("solver must be one of 'cd', 'cvxpy'")
if self.c < 1:
warnings.warn(
"c should be greater than 1 for the regularization"
" event to hold asymptotically"
)
if self.prestd and self.cov_type in ("robust", "cluster"):
warnings.warn(
"prestd is not implemented for robust penalty. "
"Data is assumed to be homoscedastic."
)
if nopen_idx is not None and not isinstance(nopen_idx, (list, np.ndarray)):
raise ValueError("nopen_idx must be a list or numpy array")
X, y = check_X_y(X, y, accept_sparse=False, ensure_min_samples=2)
self.nopen_idx_ = nopen_idx
p = self.n_features_in_ = X.shape[1]
n = X.shape[0]
# check random state
self.random_state_ = check_random_state(self.random_state)
# intercept and pre-standardization handling
if self.fit_intercept or self.prestd:
X_mean, y_mean = np.mean(X, axis=0), np.mean(y)
X, y = X - X_mean, y - y_mean
if self.prestd:
X_std, y_std = np.std(X, axis=0), np.std(y)
X, y = X / X_std, y / y_std
# pre-allocate arrays for coordinate descent solver
if self.solver == "cd":
# precompute XX and Xy crossprods
XX = X.T @ X
Xy = X.T @ y
# make matrices fortran contiguous
XX = np.asfortranarray(XX, dtype=np.float64)
X = np.asfortranarray(X, dtype=np.float64)
Xy = np.asfortranarray(Xy, dtype=np.float64)
y = np.asfortranarray(y, dtype=np.float64)
# sqrt used under homoscedastic is one-step estimator
if self.sqrt and self.cov_type == "nonrobust" and not self.x_dependent:
psi = self._psi_calc(X, n)
lambd = self._lambd_calc(n=n, p=p, X=X)
if self.solver == "cd":
beta_ridge = self._starting_values(XX, Xy, lambd, psi)
beta = _cd_solver(
X=X,
y=y,
XX=XX,
Xy=Xy,
lambd=lambd,
psi=psi,
starting_values=beta_ridge,
sqrt=self.sqrt,
fit_intercept=self.fit_intercept,
max_iter=self.cd_max_iter,
opt_tol=self.cd_tol,
zero_tol=self.zero_tol,
)
else:
beta = self._cvxpy_solver(
X=X,
y=y,
lambd=lambd,
psi=psi,
n=n,
p=p,
)
if self.post:
beta = self._post_lasso(beta, X, y)
# rescale beta
if self.prestd:
beta *= y_std / X_std
self.intercept_ = y_mean - X_mean @ beta if self.fit_intercept else 0.0
self.nonzero_idx_ = np.where(beta != 0)[0]
self.coef_ = beta
self.n_iter_ = 1
self.lambd_ = lambd
self.psi_ = psi
return
# calculate error based on initial
# highly correlated vars
r = np.empty(p)
for k in range(p):
r[k] = np.abs(st.pearsonr(X[:, k], y)[0])
X_top = X[:, np.argsort(r)[-self.n_corr :]]
beta0 = self._OLS(X_top, y)
v = y - X_top @ beta0
s1 = np.sqrt(np.mean(v**2))
psi = self._psi_calc(X=X, v=v, n=n)
lambd = self._lambd_calc(
n=n,
p=p,
v=v,
s1=s1,
X=X,
psi=psi,
)
# get initial estimates k=0
if self.solver == "cd":
beta_ridge = self._starting_values(XX, Xy, lambd, psi)
beta = _cd_solver(
X=X,
y=y,
XX=XX,
Xy=Xy,
lambd=lambd,
psi=psi,
starting_values=beta_ridge,
sqrt=self.sqrt,
fit_intercept=self.fit_intercept,
max_iter=self.cd_max_iter,
opt_tol=self.cd_tol,
zero_tol=self.zero_tol,
)
else:
beta = self._cvxpy_solver(
X=X,
y=y,
lambd=lambd,
psi=psi,
n=n,
p=p,
)
for k in range(self.max_iter):
s0 = s1
# post lasso handling
if not self.lasso_psi:
beta = self._post_lasso(beta, X, y)
# error refinement
v = y - X @ beta
s1 = np.sqrt(np.mean(v**2))
# if convergence not reached get new estimates of lambd and psi
psi = self._psi_calc(X=X, v=v, n=n)
lambd = self._lambd_calc(
n=n,
p=p,
v=v,
s1=s1,
X=X,
psi=psi,
)
if self.solver == "cd":
beta = _cd_solver(
X=X,
y=y,
XX=XX,
Xy=Xy,
lambd=lambd,
psi=psi,
starting_values=beta_ridge,
sqrt=self.sqrt,
fit_intercept=self.fit_intercept,
max_iter=self.cd_max_iter,
opt_tol=self.cd_tol,
zero_tol=self.zero_tol,
)
else:
beta = self._cvxpy_solver(
X=X,
y=y,
lambd=lambd,
psi=psi,
n=n,
p=p,
)
# check convergence
if np.abs(s1 - s0) < self.conv_tol:
break
# end of algorithm
if self.post and not self.lasso_psi:
beta = self._post_lasso(beta, X, y)
# rescale beta if standardized
if self.prestd:
beta *= y_std / X_std
self.intercept_ = y_mean - X_mean @ beta if self.fit_intercept else 0.0
self.nonzero_idx_ = np.where(beta != 0)[0]
self.coef_ = beta
self.n_iter_ = k + 1 if self.max_iter > 0 else 1
self.lambd_ = lambd
self.psi_ = psi
def fit(self, X, y, *, nopen_idx=None):
"""
Fit the model to the data.
parameters
----------
X: array-like, shape (n_samples, n_features)
Design matrix.
y: array-like, shape (n_samples,)
Target vector.
returns
-------
self: object
Returns self.
"""
# store feature names if dataset is pandas
if isinstance(X, pd.DataFrame):
self.feature_names_ = X.columns
self._fit(X, y, nopen_idx=nopen_idx)
# sklearn estimator must return self
return self
def fit_formula(self, formula, data):
"""
Fit the the model to the data using fomula language.
Parameters
----------
formula: str
Formula to fit the model. Ex: "y ~ x1 + x2 + x3"
data: Union[pandas.DataFrame, numpy.recarray, dict]
Dataset to fit the model.
Returns
-------
self: object
Returns self.
"""
y, X = dmatrices(formula, data)
self.feature_names_in_ = X.design_info.column_names
X, y = np.asarray(X), np.asarray(y)
y = y.flatten()
# check if intercept is in data
if "Intercept" in self.feature_names_in_:
if not self.fit_intercept:
raise ValueError(
(
"Intercept is in data but fit_intercept is False."
" Set fit_intercept to True to fit intercept or"
" update the formula to remove the intercept"
)
)
# drop column of ones from X
# since intercept calculated in _fit
# by partialing out
X = X[:, 1:]
self._fit(X, y)
# sklearn estimator must return self
return self
def predict(self, X):
"""
Use fitted model to predict on new data.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Design matrix.
returns
-------
y_pred: array-like, shape (n_samples,)
Predicted target values.
"""
# check if fitted
check_is_fitted(self)
X = check_array(X)
return self.intercept_ + X @ self.coef_
class RlassoLogit(BaseEstimator, ClassifierMixin):
def __init__(
self,
post=True,
fit_intercept=True,
c=1.1,
gamma=0.05,
zero_tol=1e-4,
solver_opts=None,
):
"""Rigorous Lasso Logistic Regression."""
self.post = post
self.fit_intercept = fit_intercept
self.c = c
self.gamma = gamma
self.zero_tol = zero_tol
self.solver_opts = solver_opts
def _criterion_function(self, X, y, beta, lambd, n, regularization=True):
"""Criterion function for the penalized Lasso Logistic Regression."""
ll = cp.sum(cp.multiply(y, X @ beta) - cp.logistic(X @ beta)) / n
if not regularization:
return -ll
reg = (lambd / n) * cp.norm1(beta)
return -(ll - reg)
def _cvxpy_solve(self, X, y, lambd, n, p):
"""Solve the problem using cvxpy."""
beta = cp.Variable(p)
obj = cp.Minimize(self._criterion_function(X, y, beta, lambd, n))
prob = cp.Problem(obj)
# solve problem and return beta
prob.solve(**self.solver_opts or {})
beta = beta.value
beta[np.abs(beta) < self.zero_tol] = 0.0
return beta
def _decision_function(self, X, beta):
"""Compute the decision function of the model."""
return 1 / (1 + np.exp(-X @ beta))
def _lambd_calc(self, n, p):
lambd0 = (self.c / 2) * np.sqrt(n) * st.norm.ppf(1 - self.gamma / (2 * p))
lambd = lambd0 / (2 * n)
return lambd0, lambd
def _fit(self, X, y, *, gamma=None):
n, p = X.shape
if gamma is None:
gamma = 0.1 / np.log(n)
lambd0, lambd = self._lambd_calc(n, p)
beta = self._cvxpy_solve(X, y, lambd, n, p)
return {"beta": beta, "lambd0": lambd0, "lambd": lambd}
def fit(self, X, y, *, gamma=None):
"""Fit the model to the data.
parameters
----------
X: array-like, shape (n_samples, n_features)
Design matrix.
y: array-like, shape (n_samples,)
Target vector.
gamma: float, optional (default: 0.1 / np.log(n_samples))
returns
-------
self: object
Returns self.
"""
# check inputs
X, y = check_X_y(X, y, accept_sparse=True, ensure_2d=True)
# assert y is binary
if np.unique(y).shape[0] != 2:
raise ValueError("y must be binary")
res = self._fit(X, y, gamma=gamma)
self.coef_ = res["beta"]
self.lambd0_ = res["lambd0"]
self.lambd_ = res["lambd"]
return self
def predict(self, X):
"""Predict the class labels for X."""
# check model is fitted and inputs are correct
check_is_fitted(self, ["coef_"])
X = check_array(X)
probas = self._decision_function(X, self.coef_)
return np.where(probas > 0.5, 1, 0)
def predict_proba(self, X):
"""Predict class probabilities for X."""
# check model is fitted and inputs are correct
check_is_fitted(self)
X = check_array(X)
return self._decision_function(X, self.coef_)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X."""
return np.log(self._decision_function(X, self.coef_))
class RlassoIV:
"""
Rigorous Lasso for instrumental-variable estimation in
the presence of high-dimensional instruments and/or
controls. Uses the post-double-selection (PDS) and
post-regularization (CHS) methods for estimation, see
references below.
Parameters
----------
select_X: bool, optional (default: True)
Whether to use lasso/post-lasso for feature
selection of high-dim controls.
select_Z: bool, optional (default: True)
Whether to use lasso/post-lasso for feature
selection of high-dim instruments.
post: bool, default=True
If True, post-lasso is used to estimate betas,
meaning that features selected by rlasso are
estimated by OLS in the final model, as outlined
in [2]_.
sqrt: bool, default=False
If True, square-root lasso criterion is minimized
is minimized instead of normal lasso. See [1]_ and
notes below for details.
fit_intercept: bool, default=True
If True, an unpenalized intercept is estimated
by mean centering the data prior to estimation.
cov_type: str, default="nonrobust"
Type of covariance matrix. Right now the
supported types are: "nonrobust", "robust".
x_dependent: bool, default=False
If True, the alternative and less conservative lambda
is estimated by simulation using the conditional
distribution of the design matrix.
n_sim: int, default=5000
Number of simulations to be performed for x-dependent
lambda calculation.
random_state: int, default=None
Random seed used for simulations if `x_dependent` is
set to `True`.
lasso_psi: bool, default=False
By default post-lasso is the default method for obtaining
residuals in the
prestd: bool, default=False
If True, the data is prestandardized instead of
on the fly by penalty loadings. Currently only
supports homoscedastic case.
n_corr: int, default=5
Number of most correlated variables to be used in the
for initial calculation of the residuals.
c: float, default=1.1
Slack parameter used in the lambda calculation. From
[3]_ "c needs to be greater than 1 for the regularization
event to hold asymptotically, but not too high as the
shrinkage bias is increasing in c."
gamma: float, optional=None
Regularization parameter, where the probability of
selecting the correct model is given by 1-gamma.
If not specified, the the value is set to:
0.1 / np.log(n)
max_iter: int, default=2
Maximum number of iterations to perform in the iterative
estimation procedure to obtain the Rlasso estimates.
conv_tol: float, default=1e-4
Tolerance for the convergence of the iterative estimation
procedure.
solver: str, default="cd"
Solver to be used for the iterative estimation procedure.
Alternatives are:
"cd" - coordinate descent method.
"cvxpy" - cvxpy solver.
cd_max_iter: int, default=10000
Maximum number of iterations to be perform by the coordinate
descent algorithm before stopping.
cd_tol: float, default=1e-10
Convergence tolerance for the coordinate descent algorithm.
cvxpy_opts: dict, default=None
Additional options to be passed to the cvxpy solver. See cvxpy
documentation for more details:
https://www.cvxpy.org/tutorial/advanced/index.html#solve-method-options
zero_tol: float, default=1e-4
Tolerance for the rounding of estimated coefficients to zero.
Attributes
----------
results_: dict["PDS", "CHS"]
Dictionary containing the 2-stage-least-squares estimates.
Values are `linearmodels.iv.IV2SLS` objects. See:
https://bashtage.github.io/linearmodels/iv/iv/linearmodels.iv.model.IV2SLS.html
https://bashtage.github.io/linearmodels/iv/examples/basic-examples.html
X_selected_: dict[list[str]]
List of selected controls for each stage in the estimation.
Z_selected_: list[str]
List of selected instruments.
valid_vars_: list[str]
List of variables for which standard errors and test
statistics are valid.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2015).
Post-selection and post-regularization inference in linear models with many controls and instruments.
American Economic Review, 105(5), 486-90.
.. [2] <NAME>., <NAME>., & <NAME>. (2014).
Inference on treatment effects after selection among high-dimensional controls.
The Review of Economic Studies, 81(2), 608-650.
.. [3] <NAME>., <NAME>., & <NAME>. (2019).
PDSLASSO: Stata module for post-selection and
post-regularization OLS or IV estimation and inference.
Notes
-----
``RlassoIV`` is used when one wants to use instruments in order to
estimate low-dimensional endogenous variables in the setting
.. math:: y_{i}=\\alpha d_{i}+x_{i}^{\prime} \\beta+\\varepsilon_{i}
.. math:: d_{i}=x_{i}^{\prime} \gamma+z_{i}^{\prime} \delta+u_{i}
where :math:`d_{i}` is endogonous and `both instruments $z_i$ and and
controls $x_i$ are possibily high-dimensional.
Examples
--------
>>> import numpy as np
>>> from rlassomodels import RlassoIV
>>> X = np.random.randn(100, 20)
>>> Z = np.random.randn(100, 15)
>>> d_endog = np.random.randn(100)
>>> y = np.random.randn(100)
>>> # Fit the model, use rlasso to select both controls and instruments
>>> rlasso_iv = RlassoIV(select_X=True, select_Z=True)
>>> rlasso_iv.fit(X, y, D_exog = None, D_endog=d_endog, Z=Z)
"""
def __init__(
self,
*,
select_X=True,
select_Z=True,
post=True,
sqrt=False,
fit_intercept=True,
cov_type="nonrobust",
x_dependent=False,
random_state=None,
lasso_psi=False,
prestd=False,
n_corr=5,
max_iter=2,
conv_tol=1e-4,
n_sim=5000,
c=1.1,
gamma=None,
solver="cd",
cd_max_iter=1000,
cd_tol=1e-10,
cvxpy_opts=None,
zero_tol=1e-4,
):
self.select_X = select_X
self.select_Z = select_Z
self.post = post
self.sqrt = sqrt
self.fit_intercept = fit_intercept
self.cov_type = cov_type
self.x_dependent = x_dependent
self.random_state = random_state
self.lasso_psi = lasso_psi
self.prestd = prestd
self.n_corr = n_corr
self.n_sim = n_sim
self.c = c
self.gamma = gamma
self.max_iter = max_iter
self.conv_tol = conv_tol
self.solver = solver
self.cd_max_iter = cd_max_iter
self.cd_tol = cd_tol
self.zero_tol = zero_tol
self.cvxpy_opts = cvxpy_opts
self.rlasso = Rlasso(
post=post,
sqrt=sqrt,
fit_intercept=fit_intercept,
cov_type=cov_type,
x_dependent=x_dependent,
random_state=random_state,
lasso_psi=lasso_psi,
prestd=prestd,
n_corr=n_corr,
n_sim=n_sim,
c=c,
gamma=gamma,
max_iter=max_iter,
conv_tol=conv_tol,
solver=solver,
cd_max_iter=cd_max_iter,
cd_tol=cd_tol,
cvxpy_opts=cvxpy_opts,
)
def _check_inputs(self, X, y, D_exog, D_endog, Z):
"""
Checks inputs before passed to fit. For now, data is
converted to pd.DataFrame's as it simplifices keeping track
of nonzero indices and varnames significantly.
"""
def _check_single(var, name):
if var is None:
return
if isinstance(var, pd.DataFrame):
return var
if isinstance(var, np.ndarray):
var = pd.DataFrame(var)
var.columns = [f"{name}{i}" for i in range(var.shape[1])]
return var
elif isinstance(var, pd.core.series.Series):
return (
| pd.DataFrame(var) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = | tm.box_expected(expected, xbox) | pandas._testing.box_expected |
from glob import glob
from PIL import Image
import pickle as pkl
import os
import configargparse
import configparser
import torch
import numpy as np
import argparse
import sys
import matplotlib.pyplot as plt
import yaml
from munch import munchify
import json
import PIL
from parse import parse
import collections
import random
from PIL import ImageOps
def create_dir_if_doesnt_exist(dir_path):
"""
This function creates a dictionary if it doesnt exist.
:param dir_path: string, dictionary path
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return
def crop_and_save_single(img,crop_height,crop_width,image_save_dir,name,with_label=False):
""" crop and save single image to an equal sized sub images
:param img: numpy array of the image
:param crop_height: int, height of cropped image
:param crop_width: int, width of cropped image
:param image_save_dir: string, path to directory
:param name: string, name of image to be saved
:param with_label: bool, if image array includes a mask channel
"""
assert np.mod(img.shape[0], crop_height) == 0
assert np.mod(img.shape[1], crop_width) == 0
num_row = img.shape[0] #// crop_height
num_col = img.shape[1] #// crop_width
crop_img = np.zeros((crop_height, crop_width, 4))
for row in range(0,num_row,crop_height):
for col in range(0,num_col,crop_width):
# print("row:{}, row+crop height:{}, j: {}, row+cropwidth:{}".format(row,row+crop_height,col,col+crop_width))
crop_img = img[row:row+crop_height, col:col+crop_width, :]
# out_name = img_name[:-4] + '_' + \
out_name = name + '_' + \
str(num_col) + '_' + str(row).zfill(2) + \
'_' + str(col).zfill(2)+'.png'
# if with_label:
# label_name = "/"+str(index) + "_" + date_time + "_label"
# crop_3_ch = crop_img[:,:,:3] # if cropping a labeled image
# crop_label = crop_img[:,:,-1] # if cropping a labeled image
# PIL_crop_label = Image.fromarray(crop_label.astype(np.uint8))
# # PIL_crop_label.save(save_dir[1]+"_label_"+out_name) # if cropping a labeled image
PIL_crop = Image.fromarray(crop_img[:,:,:3].astype(np.uint8))
# if with_label:
# # return PIL_crop,PIL_crop_label
# # return PIL_crop
PIL_crop.save(image_save_dir+"/"+out_name)
def get_date_from_metadata(img):
extracted_exif = {PIL.ExifTags.TAGS[k]: v for k,v in img._getexif().items() if k in PIL.ExifTags.TAGS}
date_time = extracted_exif['DateTime']
date_time = date_time.split(" ")[0].replace(":","_")
return date_time
def t2n(x):
x = x.cpu().detach().numpy()
return x
def mat_to_csv(mat_path,save_to):
import scipy.io
import pandas as pd
mat = scipy.io.loadmat(mat_path)
mat = {k:v for k,v in mat.items() if k[0]!='_'}
data = pd.DataFrame({k: | pd.Series(v[0]) | pandas.Series |
import gzip
import itertools as IT
import logging
import os
import random
from functools import partial, wraps
from pathlib import Path
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
DEFAULT_CHUNK_SIZE = 8_192
def init():
print("Using DaskLike")
def from_sequence(iter_, *args, **kwargs):
return DaskLike(iter_)
def dasklike_iter(fxn):
@wraps(fxn)
def _(*args, **kwargs):
return from_sequence(fxn(*args, **kwargs))
return _
def chunk_stream(stream, chunk_size=DEFAULT_CHUNK_SIZE):
while True:
data = list(IT.islice(stream, chunk_size))
if not data:
return
yield data
@dasklike_iter
def concat(streams):
return IT.chain.from_iterable(streams)
@dasklike_iter
def read_text(path_glob, include_path=False, compression=None, progress=True):
if not isinstance(path_glob, (list, tuple)):
path_glob = [path_glob]
filenames = list(IT.chain.from_iterable(Path().glob(str(pg)) for pg in path_glob))
if progress:
path_glob_str = os.path.commonprefix(filenames)
filenames = tqdm(filenames, desc=f"Reading glob {path_glob_str}", leave=False)
for filename in filenames:
if filename.suffix.endswith(".gz") or compression == "gzip":
openfxn = gzip.open
else:
openfxn = open
with openfxn(filename, "r") as fd:
if progress:
fd = tqdm(fd, desc=filename.name, leave=False)
for line in fd:
if include_path:
yield (line, filename)
else:
yield line
class DaskLike:
def __init__(self, stream=None):
if isinstance(stream, DaskLike):
self.stream = stream.stream
else:
self.stream = stream
def __iter__(self):
return iter(self.stream)
@dasklike_iter
def map(self, fxn, *args, **kwargs):
return map(partial(fxn, *args, **kwargs), self.stream)
@dasklike_iter
def flatten(self):
return IT.chain.from_iterable(self.stream)
@dasklike_iter
def map_partitions(self, fxn, *args, partition_size=DEFAULT_CHUNK_SIZE, **kwargs):
fxn_partial = partial(fxn, *args, **kwargs)
for chunk in chunk_stream(self.stream, chunk_size=partition_size):
yield map(fxn_partial, self.stream)
@dasklike_iter
def groupby(self, grouper, sort=False):
stream = self.stream
if sort:
stream = list(stream)
stream.sort(key=grouper)
return ((k, list(g)) for k, g in IT.groupby(stream, key=grouper))
@dasklike_iter
def filter(self, fxn, *args, **kwargs):
return filter(partial(fxn, *args, **kwargs), self.stream)
def take(self, N, compute=True):
take_stream, self.stream = IT.tee(self.stream)
data = IT.islice(take_stream, N)
if compute:
return list(data)
return from_sequence(data)
@dasklike_iter
def debug_counter(self, desc, every=500):
for i, item in enumerate(self.stream):
if (i + 1) % every == 0:
print(f"[{desc}] {i}")
yield item
@dasklike_iter
def debug_sampler(self, desc, proba):
for item in self.stream:
if random.random() < proba:
print(f"[{desc}] Sample:")
print(item)
yield item
def to_dataframe(
self, meta=None, columns=None, partition_size=DEFAULT_CHUNK_SIZE, progress=True
):
if meta:
columns = list(meta.keys())
stream = self.stream
if progress:
stream = tqdm(stream, desc="Creating Dataframe")
df = pd.DataFrame(columns=columns)
if meta:
df = df.astype(meta)
for chunk in chunk_stream(stream, chunk_size=partition_size):
df_chunk = | pd.DataFrame(chunk, columns=columns) | pandas.DataFrame |
import unittest
import pandas as pd
import pandas.util.testing as pdtest
import numpy as np
from tia.analysis.model import *
class TestAnalysis(unittest.TestCase):
def setUp(self):
self.closing_pxs = pd.Series(
np.arange(10, 19, dtype=float),
pd.date_range("12/5/2014", "12/17/2014", freq="B"),
)
self.dvds = pd.Series(
[1.25, 1.0],
index=[pd.to_datetime("12/8/2014"), pd.to_datetime("12/16/2014")],
)
def test_trade_split(self):
trd = Trade(1, "12/1/2014", 10.0, 10.0, -1.0)
t1, t2 = trd.split(4)
self.assertEqual(t1.qty, 4)
self.assertEqual(t1.fees, -0.4)
self.assertEqual(t1.ts, trd.ts)
self.assertEqual(t2.qty, 6)
self.assertEqual(t2.fees, -0.6)
self.assertEqual(t2.ts, trd.ts)
def test_txn_details(self):
t1 = Trade(1, "12/8/2014", 5.0, 10.0, -1.0)
t2 = Trade(2, "12/8/2014", 2.0, 15.0, -1.0)
t3 = Trade(3, "12/10/2014", -3.0, 5.0, -1.0)
t4 = Trade(4, "12/12/2014", -4.0, 20.0, -1.0)
t5 = Trade(5, "12/16/2014", -4.0, 10.0, 0)
t6 = Trade(6, "12/17/2014", 4.0, 15.0, 0)
sec = PortfolioPricer(
multiplier=2.0, closing_pxs=self.closing_pxs, dvds=self.dvds
)
# Test txn frame
port = SingleAssetPortfolio(sec, [t1, t2, t3, t4, t5, t6])
txns = port.txns.frame
index = list(range(len(port.trades)))
pdtest.assert_series_equal(
txns.txn_qty, pd.Series([5.0, 2.0, -3.0, -4.0, -4, 4], index=index)
)
pdtest.assert_series_equal(
txns.open_val,
pd.Series([-100.0, -160.0, -160.0 * 4.0 / 7.0, 0, 80, 0], index=index),
)
pdtest.assert_series_equal(
txns.txn_fees, pd.Series([-1.0, -1.0, -1.0, -1.0, 0, 0], index=index)
)
pdtest.assert_series_equal(
txns.txn_intent,
pd.Series(
[
Intent.Open,
Intent.Increase,
Intent.Decrease,
Intent.Close,
Intent.Open,
Intent.Close,
],
index=index,
),
)
pdtest.assert_series_equal(
txns.txn_action,
pd.Series(
[
Action.Buy,
Action.Buy,
Action.Sell,
Action.Sell,
Action.SellShort,
Action.Cover,
],
index=index,
),
)
# CHECK PL
pl = port.pl
# Load the dataset
import tia, os
xl = os.path.join(tia.__path__[0], "tests", "test_analysis.xlsx")
expected = pd.read_excel(xl)
expected = expected.reset_index()
# check ltd txn level
ltd = pl.ltd_txn_frame
pdtest.assert_series_equal(expected.pos.astype(float), ltd.pos)
pdtest.assert_series_equal(expected.ltd_pl, ltd.pl)
pdtest.assert_series_equal(expected.ltd_upl, ltd.upl)
pdtest.assert_series_equal(expected.ltd_rpl, ltd.rpl)
pdtest.assert_series_equal(expected.ltd_dvds, ltd.dvds)
pdtest.assert_series_equal(expected.ltd_fees.astype(float), ltd.fees)
pdtest.assert_series_equal(expected.ltd_rpl_gross, ltd.rpl_gross)
# check txn level
txnlvl = pl.txn_frame
pdtest.assert_series_equal(expected.pos.astype(float), txnlvl.pos)
pdtest.assert_series_equal(expected.dly_pl, txnlvl.pl)
| pdtest.assert_series_equal(expected.dly_upl, txnlvl.upl) | pandas.util.testing.assert_series_equal |
"""
Seed processing code
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/seeds.py,v 1.7 2018/01/27 15:37:17 burnett Exp $
"""
import os, sys, time, pickle, glob, types
import numpy as np
import pandas as pd
from astropy.io import fits
from skymaps import SkyDir, Band
from uw.utilities import keyword_options
from uw.like2 import (tools, sedfuns, maps, sources, localization, roimodel,)
from uw.like2.pipeline import (check_ts,) #oops stagedict)
#### need to fix!
from uw.like2.pub import healpix_map
def read_seedfile(seedkey, filename=None, config=None):
model_name = os.getcwd().split('/')[-1]
if model_name.startswith('month') and seedkey=='pgw':
#monthly mode, need to find and load PGW analysis with rouighly equivalent months
month=int(model_name[5:]);
filename='/nfs/farm/g/glast/g/catalog/transients/TBIN_%d_all_pgw.txt'% (month-1)
assert os.path.exists(filename), 'PGWAVE file %s not found'% filename
try:
seeds = pd.read_table(filename, sep=' ', skipinitialspace=True, index_col=1,
header=None,
names='tbin ra dec k_signif pgw_roi fgl_seed fgl_ra fgl_dec fgl_assoc'.split())
except Exception as msg:
raise Exception('Failed to read file %s: %s' % (filename, msg))
names=[]
for i,s in seeds.iterrows():
j = int(s.name[4:6]) if s.name[6]=='_' else int(s.name[4:5])
names.append('PGW_%02d_%03d_%02d' % (month, int(s.pgw_roi), j))
seeds['name'] = names
elif model_name.startswith('month') and seedkey=='PGW':
# monthly mode, new format PGwave, in a single FITS file
month=int(model_name[5:]);
assert os.path.exists(filename), 'PGWAVE file {} not found'.format( filename)
t = fits.open(filename)
df=pd.DataFrame(t[1].data)
selector = lambda month : (df.run=='1m ') & (df.TBIN=='TBIN_{:<2d}'.format(month-1))
cut = selector(month)
assert sum(cut)>0, 'No seeds found for month {}'.format(month)
print ('Found {} PGWave seeds'.format(sum(cut)))
ra = np.array(df.Ra[cut],float)
dec = np.array(df.Dec[cut],float)
prefix = 'PG{:02d} '.format(int(month))
# note making it a string type
name = np.array([prefix + n.split('_')[-1].strip() for n in 'TBIN_{}_'.format(month-1)+df.PGW_name[cut]])
seeds = pd.DataFrame([name, ra,dec], index='name ra dec'.split()).T
elif filename is None and config is not None:
# assume that config[seedkey] is the filename
if seedkey in config:
filename = config[seedkey]
elif os.path.exists('seeds_{}.csv'.format(seedkey)):
filename='seeds_{}.csv'.format(seedkey)
else:
raise Exception('seedkey {} not found in config, or filename'.format(seedkey))
if os.path.splitext(filename)=='.fits':
# a standard FITS catalog
f = fits.open(os.path.expandvars(filename))
name, ra, dec = [f[1].data.field(x) for x in 'Source_Name RAJ2000 DEJ2000'.split()]
seeds = pd.DataFrame([name, np.array(ra,float),np.array(dec,float)],
index='name ra dec'.split()).T
else:
seeds = pd.read_csv(filename)
elif filename is not None:
# file is cvs
seeds = pd.read_csv(filename)
else:
# reading a TS seeds file
t = glob.glob('seeds_%s*' % seedkey)
assert len(t)==1, 'Seed file search, using key {}, failed to find one file\n\t{}'.format( seedkey,t)
seedfile=t[0]
try:
csv_format=seedfile.split('.')[-1]=='csv'
if csv_format:
seeds = pd.read_csv(seedfile)
else:
seeds = pd.read_table(seedfile)
except Exception as msg:
raise Exception('Failed to read file %s, perhaps empty: %s' %(seedfile, msg))
seeds['skydir'] = map(SkyDir, seeds.ra, seeds.dec)
seeds['hpindex'] = map( Band(12).index, seeds.skydir)
# check for duplicated names
dups = seeds.name.duplicated()
if sum(dups)>0:
print ('\tRemoving {} duplicate entries'.format(sum(dups)))
return seeds[np.logical_not(dups)]
return seeds
def select_seeds_in_roi(roi, fn='seeds/seeds_all.csv'):
""" Read seeds from csv file, return those in the given ROI
roi : int or Process instance
if the latter, look up index from roi direction. direction
"""
if type(roi)!=int:
roi = Band(12).index(roi.roi_dir)
seeds = pd.read_csv(fn, index_col=0)
seeds['skydir'] = map(SkyDir, seeds.ra, seeds.dec)
seeds.index.name = 'name'
sel = np.array(map( Band(12).index, seeds.skydir))==roi
return seeds[sel]
def add_seeds(roi, seedkey='all', config=None,
model='PowerLaw(1e-14, 2.2)',
associator=None, tsmap_dir='tsmap_fail',
tsmin=10, lqmax=20,
update_if_exists=False,
location_tolerance=0.5,
pair_tolerance=0.25,
**kwargs):
""" add "seeds" from a text file the the current ROI
roi : the ROI object
seedkey : string
Expect one of 'pgw' or 'ts' for now. Used by read_seedfile to find the list
associator :
tsmap_dir
mints : float
minimum TS to accept for addition to the model
lqmax : float
maximum localization quality for tentative source
"""
def add_seed(s):
# use column 'key' to determine the model to use
model = maps.table_info[s['key']][1]['model']
try:
src=roi.add_source(sources.PointSource(name=s.name, skydir=s['skydir'], model=model))
if src.model.name=='LogParabola':
roi.freeze('beta',src.name)
elif src.model.name=='PLSuperExpCutoff':
roi.freeze('Cutoff', src.name)
print ('%s: added at %s' % (s.name, s['skydir']))
except Exception as msg:
print ('*** fail to add source:', msg)
if update_if_exists:
src = roi.get_source(s.name)
print ('{}: updating existing source at {} '.format(s.name, s['skydir']))
else:
print ('{}: Fail to add "{}"'.format(s.name, msg))
return
# profile
prof= roi.profile(src.name, set_normalization=True)
src.ts= prof['ts'] if prof is not None else 0
# fit Norm
try:
roi.fit(s.name+'_Norm', tolerance=0., ignore_exception=False)
except Exception as msg:
print ('\tFailed to fit seed norm: \n\t{}\nTrying full fit'.format(msg))
return False
# fit both parameters
try:
roi.fit(s.name, tolerance=0., ignore_exception=False)
except Exception as msg:
print ('\tFailed to fit seed norm and index:')
return False
ts = roi.TS()
print ('\nTS = %.1f' % ts,)
if ts<tsmin:
print (' <%.1f, Fail to add.' % tsmin)
return False
else: print (' OK')
# one iteration of pivot change
iter = 2
if iter>0 and roi.repivot([src], min_ts=tsmin,select=src.name ):
iter -=1
# and a localization: remove if fails or poor
roi.localize(s.name, update=True, tolerance=1e-3)
quality = src.ellipse[5] if hasattr(src, 'ellipse') and src.ellipse is not None else None
if quality is None or quality>lqmax:
print ('\tFailed localization, quality {}, maximum allowed {}'.format(quality, lqmax))
return True
seedfile = kwargs.pop('seedfile', 'seeds/seeds_{}.csv'.format(seedkey))
seedlist = select_seeds_in_roi(roi, seedfile)
if len(seedlist)==0:
print ('no seeds in ROI')
return False
else:
print ('Found {} seeds from {} in this ROI: check positions'.format(len(seedlist),seedfile))
good = 0
for sname,s in seedlist.iterrows():
print ('='*20, sname, 'Initial TS:{:.1f}'.format(s.ts), '='*20)
if not add_seed( s):
roi.del_source(sname)
else: good +=1
return good>0
def create_seeds(keys = ['ts', 'tsp', 'hard', 'soft'], seed_folder='seeds', tsmin=10,
merge_tolerance=1.0, update=False, max_pixels=30000,):
"""Process the
"""
#keys =stagedict.stagenames[stagename]['pars']['table_keys']
modelname = os.getcwd().split('/')[-1];
if modelname.startswith('uw'):
seedroot=''
elif modelname.startswith('year'):
seedroot='y'+modelname[-2:]
elif modelname.startswith('month'):
seedroot='m'+modelname[-2:]
else:
raise Exception('Unrecognized model name, {}. '.format(modelname))
# list of prefix characters for each template
prefix = dict(ts='M', tsp='P', hard='H', soft='L')
if not os.path.exists(seed_folder):
os.mkdir(seed_folder)
table_name = 'hptables_{}_512.fits'.format('_'.join(keys))
if not (update or os.path.exists(table_name)):
print ("Checking that all ROI map pickles are present...")
ok = True;
for key in keys:
folder = '{}_table_512'.format(key)
assert os.path.exists(folder), 'folder {} not found'.format(folder)
files = sorted(glob.glob(folder+'/*.pickle'))
print (folder, )
n = files[0].find('HP12_')+5
roiset = set([int(name[n:n+4]) for name in files])
missing = sorted(list(set(range(1728)).difference(roiset)))
if missing==0: ok = False
print ('{} missing: {}'.format(len(missing), missing ) if len(missing)>0 else 'OK' )
assert ok, 'One or more missing runs'
print ('Filling tables...')
healpix_map.assemble_tables(keys)
assert os.path.exists(table_name)
# generate txt files with seeds
print ('Run cluster analysis for each TS table')
seedfiles = ['{}/seeds_{}.txt'.format(seed_folder, key) for key in keys]
# make DataFrame tables from seedfiles
tables=[]
for key, seedfile in zip(keys, seedfiles):
print ('{}: ...'.format(key),)
if os.path.exists(seedfile) and not update:
print ('Seedfile {} exists: skipping make_seeds step...'.format(seedfile))
table = pd.read_table(seedfile, index_col=0)
print ('found {} seeds'.format(len(table)))
else:
rec = open(seedfile, 'w')
nseeds = check_ts.make_seeds('test', table_name, fieldname=key, rec=rec,
seedroot=seedroot+prefix[key], rcut=tsmin, minsize=1,mask=None, max_pixels=max_pixels,)
if nseeds>0:
#read back, set skydir column, add to list of tables
print ('\tWrote file {} with {} seeds'.format(seedfile, nseeds))
table = pd.read_table(seedfile, index_col=0)
table['skydir'] = map(SkyDir, table.ra, table.dec)
table['key'] = key
else:
print ('\tFailed to find seeds: file {} not processed.'.format(seedfile))
continue
tables.append(table)
if len(tables)<2:
print ('No files to merge')
return
u = merge_seed_files(tables, merge_tolerance);
print ('Result of merge with tolerance {} deg: {}/{} kept'.format(merge_tolerance,len(u), sum([len(t) for t in tables])))
outfile ='{}/seeds_all.csv'.format(seed_folder)
u.to_csv(outfile)
print ('Wrote file {} with {} seeds'.format(outfile, len(u)))
def merge_seed_files(tables, dist_deg=1.0):
"""Merge multiple seed files
tables : list of data frames
"""
dist_rad = np.radians(dist_deg)
for t in tables:
t['skydir'] = map(SkyDir, t.ra, t.dec)
def find_close(A,B):
""" helper function: make a DataFrame with A index containg
columns of the
name of the closest entry in B, and its distance
A, B : DataFrame objects each with a skydir column
"""
def mindist(a):
d = map(a.difference, B.skydir.values)
n = np.argmin(d)
return [B.index[n], B.ts[n], np.degrees(d[n])]
df = pd.DataFrame( map(mindist, A.skydir.values),
index=A.index, columns=('id_b', 'ts_b', 'distance'))
df['ts_a'] = A.ts
df['id_a'] = A.index
return df
def merge2(A,B):
"Merge two tables"
close_df = find_close(A,B).query('distance<{}'.format(dist_rad))
bdups = close_df.query('ts_b<ts_a')
bdups.index=bdups.id_b
bdups = bdups[~bdups.index.duplicated()]
adups = close_df.query('ts_b>ts_a')
A['dup'] = adups['id_b']
B['dup'] = bdups['id_a']
merged= A[ | pd.isnull(A.dup) | pandas.isnull |
import pytest
import pandas as pd
import numpy as np
from pandas import testing as pdt
from pandas import Timestamp
from datetime import datetime
from pyam import utils, META_IDX
TEST_VARS = ["foo", "foo|bar", "foo|bar|baz"]
TEST_CONCAT_SERIES = pd.Series(["foo", "bar", "baz"], index=["f", "b", "z"])
def test_pattern_match_none():
data = pd.Series(["foo", "bar"])
values = ["baz"]
obs = utils.pattern_match(data, values)
assert (obs == [False, False]).all()
def test_pattern_match_nan():
data = pd.Series(["foo", np.nan])
values = ["baz"]
obs = utils.pattern_match(data, values, has_nan=True)
assert (obs == [False, False]).all()
def test_pattern_match_one():
data = | pd.Series(["foo", "bar"]) | pandas.Series |
try:
import debug_settings
except:
print("Cannot import debug settings!")
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
import glob
import numpy as np
import os
import pandas as pd
import sys
import yaml
from copy import deepcopy
from argparse import ArgumentParser
from collections import OrderedDict
from pathlib import Path
import bark.core.commons
import bark.core
import bark.core.models.behavior
from bark.runtime.commons.parameters import ParameterServer
from bark.runtime.scenario.scenario_generation.configurable_scenario_generation import (
ConfigurableScenarioGeneration, add_config_reader_module)
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.viewer.video_renderer import VideoRenderer
from bark_mcts.models.behavior.hypothesis.behavior_space.behavior_space import \
BehaviorSpace
from bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML
from bark_ml.evaluators.goal_reached import GoalReached
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import FQFAgent
from hythe.libs.environments.gym import HyDiscreteHighway
from hythe.libs.observer.belief_observer import BeliefObserver
from load.benchmark_database import BenchmarkDatabase
from serialization.database_serializer import DatabaseSerializer
add_config_reader_module("bark_mcts.runtime.scenario.behavior_space_sampling")
is_local = True
logging.info("Running on process with ID: {}".format(os.getpid()))
def configure_args(parser=None):
if parser is None:
parser = ArgumentParser()
parser.add_argument('--mode', type=str, default="train")
parser.add_argument('--output_dir', "--od", type=str, default="results/debug/exp_7bd21e7d-3c33-4dc9-8d1e-83dbb951f234")
parser.add_argument('--episode_no', "--en", type=int, default=1000)
return parser.parse_args(sys.argv[1:])
def configure_behavior_space(params):
return BehaviorSpace(params)
def main():
print("Experiment server at :", os.getcwd())
args = configure_args()
#load exp params
exp_dir = args.output_dir
params_filename = glob.glob(os.path.join(exp_dir, "params_[!behavior]*"))
params = ParameterServer(filename=params_filename[0])
params.load(fn=params_filename[0])
params["ML"]["BaseAgent"]["SummaryPath"] = os.path.join(exp_dir, "agent/summaries")
params["ML"]["BaseAgent"]["CheckpointPath"] = os.path.join(exp_dir, "agent/checkpoints")
splits = 8
behavior_params_filename = glob.glob(os.path.join(exp_dir, "behavior_params*"))
if behavior_params_filename:
params_behavior = ParameterServer(filename=behavior_params_filename[0])
else:
params_behavior = ParameterServer(filename="configuration/params/1D_desired_gap_no_prior.json")
behavior_space = configure_behavior_space(params_behavior)
hypothesis_set, hypothesis_params = behavior_space.create_hypothesis_set_fixed_split(split=splits)
observer = BeliefObserver(params, hypothesis_set, splits=splits)
behavior = BehaviorDiscreteMacroActionsML(params_behavior)
evaluator = GoalReached(params)
viewer = MPViewer(params=params,
x_range=[-35, 35],
y_range=[-35, 35],
follow_agent_id=True)
# database creation
dir_prefix = ""
dbs = DatabaseSerializer(test_scenarios=2, test_world_steps=2,
num_serialize_scenarios=10)
dbs.process(os.path.join(dir_prefix, "configuration/database"), filter_sets="**/**/interaction_merging_light_dense_1D.json")
local_release_filename = dbs.release(version="test")
db = BenchmarkDatabase(database_root=local_release_filename)
scenario_generator, _, _ = db.get_scenario_generator(0)
video_renderer = VideoRenderer(renderer=viewer, world_step_time=0.2)
env = HyDiscreteHighway(params=params,
scenario_generation=scenario_generator,
behavior=behavior,
evaluator=evaluator,
observer=observer,
viewer=video_renderer,
render=is_local)
# non-agent evaluation mode
num_steps = 100
num_samples = params_behavior["BehaviorSpace"]["Hypothesis"]["BehaviorHypothesisIDM"]["NumSamples"]
print("Steps, samples, splits", num_steps, num_samples, splits)
step = 1
env.reset()
threshold = observer.is_enabled_threshold
discretize = observer.is_discretize
beliefs_df = | pd.DataFrame(columns=["Step", "Action", "Agent", "Beliefs", "HyNum"]) | pandas.DataFrame |
import sys, io, json, base64, datetime as dt
sys.path.append("tmp")
import matplotlib
matplotlib.use('Agg') #not sure if this include / 'Agg' is necessary
import cntk
from helpers_cntk import *
####################################
# Parameters
####################################
classifier = 'svm' #must match the option used for model training
imgPath = "uploadedImg.jpg" #"12.jpg"
resourcesDir = "tmp"
# Do not change
run_mbSize = 1
svm_boL2Normalize = True
if classifier == "svm":
cntkRefinedModelPath = pathJoin(resourcesDir, "cntk_fixed.model")
else:
cntkRefinedModelPath = pathJoin(resourcesDir, "cntk_refined.model")
workingDir = pathJoin(resourcesDir, "tmp/")
svmPath = pathJoin(resourcesDir, classifier + ".np") #only used if classifier is set to 'svm'
lutId2LabelPath = pathJoin(resourcesDir, "lutId2Label.pickle")
################
# API run() and
# init() methods
################
# API call entry point
def run(input_df):
try:
print("Python version: " + str(sys.version) + ", CNTK version: " + cntk.__version__)
startTime = dt.datetime.now()
print(str(input_df))
# convert input back to image and save to disk
base64ImgString = input_df['image base64 string'][0]
print(base64ImgString)
pil_img = base64ToPilImg(base64ImgString)
print("pil_img.size: " + str(pil_img.size))
pil_img.save(imgPath, "JPEG")
print("Save pil_img to: " + imgPath)
# Load model <---------- SHOULD BE DONE JUST ONCE
print("Classifier = " + classifier)
makeDirectory(workingDir)
if not os.path.exists(cntkRefinedModelPath):
raise Exception("Model file {} does not exist, likely because the {} classifier has not been trained yet.".format(cntkRefinedModelPath, classifier))
model = load_model(cntkRefinedModelPath)
lutId2Label = readPickle(lutId2LabelPath)
# Run DNN
printDeviceType()
node = getModelNode(classifier)
mapPath = pathJoin(workingDir, "rundnn_map.txt")
dnnOutput = runCntkModelImagePaths(model, [imgPath], mapPath, node, run_mbSize)
# Predicted labels and scores
scoresMatrix = runClassifierOnImagePaths(classifier, dnnOutput, svmPath, svm_boL2Normalize)
scores = scoresMatrix[0]
predScore = np.max(scores)
predLabel = lutId2Label[np.argmax(scores)]
print("Image predicted to be '{}' with score {}.".format(predLabel, predScore))
# Create json-encoded string of the model output
executionTimeMs = (dt.datetime.now() - startTime).microseconds / 1000
outDict = {"label": str(predLabel), "score": str(predScore), "allScores": str(scores),
"Id2Labels": str(lutId2Label), "executionTimeMs": str(executionTimeMs)}
outJsonString = json.dumps(outDict)
print("Json-encoded detections: " + outJsonString[:120] + "...")
print("DONE.")
return(str(outJsonString))
except Exception as e:
return(str(e))
# API initialization method
def init():
try:
print("Executing init() method...")
print("Python version: " + str(sys.version) + ", CNTK version: " + cntk.__version__)
except Exception as e:
print("Exception in init:")
print(str(e))
################
# Main
################
def main():
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
import pandas
# Create random 5x5 pixels image to use as sample input
#base64ImgString = "iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAIAAAACDbGyAAAAFElEQVR4nGP8//8/AxJgYkAFpPIB6vYDBxf2tWQAAAAASUVORK5CYII="
#pilImg = pilImread("C:/Users/pabuehle/Desktop/vienna/iris4/tiny.jpg")
pilImg = Image.fromarray((np.random.rand(5, 5, 3) * 255).astype('uint8')) #.convert('RGB')
base64ImgString = pilImgToBase64(pilImg) #random 5x5 pixels image
# Call init() and run() function
init()
df = | pandas.DataFrame(data=[[base64ImgString]], columns=['image base64 string']) | pandas.DataFrame |
from pytorch_lightning.core.step_result import TrainResult
import pandas as pd
import torch
import math
import numpy as np
from src.utils import simple_accuracy
from copy import deepcopy
from torch.optim.lr_scheduler import LambdaLR
class WeightEMA(object):
def __init__(self, model, ema_model, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.ema_model.eval()
self.alpha = alpha
self.ema_has_module = hasattr(self.ema_model, 'module')
# Fix EMA. https://github.com/valencebond/FixMatch_pytorch thank you!
self.param_keys = [k for k, _ in self.ema_model.named_parameters()]
self.buffer_keys = [k for k, _ in self.ema_model.named_buffers()]
for p in self.ema_model.parameters():
p.requires_grad_(False)
def step(self):
needs_module = hasattr(self.model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = self.model.state_dict()
esd = self.ema_model.state_dict()
for k in self.param_keys:
if needs_module:
j = 'module.' + k
else:
j = k
model_v = msd[j].detach()
ema_v = esd[k]
esd[k].copy_(ema_v * self.alpha + (1. - self.alpha) * model_v)
for k in self.buffer_keys:
if needs_module:
j = 'module.' + k
else:
j = k
esd[k].copy_(msd[j])
class UnlabelledStatisticsLogger:
def __init__(self, level='image', save_frequency=500, artifacts_path=None, name='unlabelled'):
self.level = level
self.batch_dfs = []
self.save_frequency = save_frequency
self.artifacts_path = artifacts_path
self.logging_df = pd.DataFrame()
self.name = name
self.strategies = set()
def log_statistics(self,
u_scores: torch.tensor,
u_targets: torch.tensor,
u_pseudo_targets: torch.tensor,
u_ids: torch.tensor,
current_epoch: int,
strategy_name=None,
current_globalstep: int = None):
if self.level == 'batch':
raise NotImplementedError()
# Needs to be rewriten to consider u_scores
# certain_ul_targets = u_targets[thresholding_mask == 1.0].cpu().numpy()
# all_ul_targets = u_targets.cpu().numpy()
# result.log('certain_ul_acc', certain_ul_acc, on_epoch=False, on_step=True, sync_dist=True)
# result.log('all_ul_acc', all_ul_acc, on_epoch=False, on_step=True, sync_dist=True)
# result.log('max_probs', u_scores.mean(), on_epoch=False, on_step=True, sync_dist=True)
# result.log('n_certain', thresholding_mask.sum(), on_epoch=False, on_step=True, sync_dist=True)
elif self.level == 'image':
batch_df = pd.DataFrame(index=range(len(u_ids)))
batch_df['image_id'] = u_ids.tolist()
batch_df['score'] = u_scores.tolist()
batch_df['correctness'] = (u_pseudo_targets == u_targets).tolist()
batch_df['epoch'] = current_epoch
if current_globalstep is not None:
batch_df['datastep'] = current_globalstep
if strategy_name is not None:
batch_df['strategy'] = strategy_name
self.strategies.add(strategy_name)
self.batch_dfs.append(batch_df)
def on_epoch_end(self, current_epoch):
if self.level:
for batch_df in self.batch_dfs:
self.logging_df = self.logging_df.append(batch_df, ignore_index=True)
self.batch_dfs = []
if self.level == 'image' and current_epoch % self.save_frequency == 0:
epochs_range = self.logging_df['epoch'].min(), self.logging_df['epoch'].max()
csv_path = f'{self.artifacts_path}/{self.name}_epochs_{epochs_range[0]:05d}_{epochs_range[1]:05d}.csv'
self.logging_df.to_csv(csv_path, index=False)
self.logging_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MaxAbsScaler
from sklearn.cluster import KMeans
#%% load dataset
vehicles = pd.read_csv('../../data/raw/vehicles.csv')
print(vehicles.head())
print(vehicles.columns)
print(vehicles.shape)
#%% choosing select columns
select_columns = ['make', 'model', 'year', 'displ', 'cylinders', 'trany', 'drive', 'VClass', 'fuelType', 'barrels08',
'city08', 'highway08', 'comb08', 'co2TailpipeGpm', 'fuelCost08']
vehicles = vehicles[select_columns][vehicles.year <= 2016].drop_duplicates().dropna()
vehicles = vehicles.sort_values(['make', 'model', 'year'])
#%% rename the columns
vehicles.columns = ['Make', 'Model', 'Year', 'Engine Displacement', 'Cylinders', 'Transmission', 'Drivetrain',
'Vehicle Class', 'Fuel Type', 'Fuel Barrels/Year', 'City MPG', 'Highway MPG', 'Combined MPG',
'CO2 Emission Grams/Mile', 'Fuel Cost/Year']
#%% aggregating to higher level categories
def unique_col_values(df):
for column in df:
print(f'{df[column].name} | {len(df[column].unique())} | {df[column].dtype}')
unique_col_values(vehicles)
#%% aggregating transmission types
AUTOMATIC = 'Automatic'
MANUAL = 'Manual'
vehicles.loc[vehicles['Transmission'].str.startswith('A'), 'Transmission Type'] = AUTOMATIC
vehicles.loc[vehicles['Transmission'].str.startswith('M'), 'Transmission Type'] = MANUAL
print(vehicles['Transmission Type'].sample(5))
#%% aggregating vehicle class
small = ['Compact Cars', 'Subcompact Cars', 'Two Seaters', 'Minicompact Cars']
midsize = ['Midsize Cars']
large = ['Large Cars']
vehicles.loc[vehicles['Vehicle Class'].isin(small), 'Vehicle Category'] = 'Small Cars'
vehicles.loc[vehicles['Vehicle Class'].isin(midsize), 'Vehicle Category'] = 'Midsize Cars'
vehicles.loc[vehicles['Vehicle Class'].isin(large), 'Vehicle Category'] = 'Large Cars'
vehicles.loc[vehicles['Vehicle Class'].str.contains('Station'), 'Vehicle Category'] = 'Station Wagons'
vehicles.loc[vehicles['Vehicle Class'].str.contains('Truck'), 'Vehicle Category'] = 'Pickup Trucks'
vehicles.loc[vehicles['Vehicle Class'].str.contains('Special Purpose'), 'Vehicle Category'] = 'Special Purpose'
vehicles.loc[vehicles['Vehicle Class'].str.contains('Sport Utility'), 'Vehicle Category'] = 'Sport Utility'
vehicles.loc[vehicles['Vehicle Class'].str.lower().str.contains('van'), 'Vehicle Category'] = 'Vans & Minivans'
print(vehicles['Vehicle Category'].sample(5))
#%% aggregating make and model
vehicles['Model Type'] = (vehicles['Make'] + ' ' + vehicles['Model'].str.split().str.get(0))
print(vehicles['Model Type'].sample(5))
#%% aggregating fuel type
print(vehicles['Fuel Type'].unique())
vehicles['Gas'] = 0
vehicles['Ethanol'] = 0
vehicles['Electric'] = 0
vehicles['Propane'] = 0
vehicles['Natural Gas'] = 0
vehicles.loc[vehicles['Fuel Type'].str.contains('Regular|Gasoline|Midgrade|Premium|Diesel'), 'Gas'] = 1
vehicles.loc[vehicles['Fuel Type'].str.contains('E85'), 'Ethanol'] = 1
vehicles.loc[vehicles['Fuel Type'].str.contains('Electricity'), 'Electric'] = 1
vehicles.loc[vehicles['Fuel Type'].str.contains('propane'), 'Propane'] = 1
vehicles.loc[vehicles['Fuel Type'].str.contains('natural|CNG'), 'Natural Gas'] = 1
vehicles.loc[vehicles['Fuel Type'].str.contains('Regular|Gasoline'), 'Gas Type'] = 'Regular'
vehicles.loc[vehicles['Fuel Type'].str.contains('Midgrade'), 'Gas Type'] = 'Midgrade'
vehicles.loc[vehicles['Fuel Type'].str.contains('Premium'), 'Gas Type'] = 'Premium'
vehicles.loc[vehicles['Fuel Type'].str.contains('Diesel'), 'Gas Type'] = 'Diesel'
vehicles.loc[vehicles['Fuel Type'].str.contains('natural|CNG'), 'Gas Type'] = 'Natural'
cols = ['Fuel Type', 'Gas Type', 'Gas', 'Ethanol', 'Electric', 'Propane', 'Natural Gas']
print(vehicles[cols].sample(5))
#%% creating categories from continuous variables
# fuel efficiency
efficiency_categories = ['Very Low Efficiency', 'Low Efficiency', 'Moderate Efficiency', 'High Efficiency',
'Very High Efficiency']
vehicles['Fuel Efficiency'] = pd.qcut(vehicles['Combined MPG'], 5, efficiency_categories)
print(vehicles['Fuel Efficiency'].sample(5))
#%% engine size
engine_categories = ['Very Small Engine', 'Small Engine', 'Moderate Engine', 'Large Engine', 'Very Large Engine']
vehicles['Engine Size'] = | pd.qcut(vehicles['Engine Displacement'], 5, engine_categories) | pandas.qcut |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Testing hwm_allocation() with bookings in natural order.
import unittest
from imscommon.es.ims_esclient import ESClient
from pyspark.sql import HiveContext
from pyspark import SparkContext, SparkConf
import optimizer.util
import pandas
from pandas.testing import assert_frame_equal
import optimizer.algo.hwm
import os
import json
import warnings
class Unittest_HWM_Allocations_2(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
with open(fpath + '/data_source/bookings_fully_overlapped.json') as bookings_source:
self.bookings = json.load(bookings_source)
with open(fpath + '/data_source/cfg.json') as cfg_source:
self.cfg = json.load(cfg_source)
today = '20180402'
self.days = optimizer.util.get_days_from_bookings(today, self.bookings)
self.sc = SparkContext.getOrCreate()
self.hive_context = HiveContext(self.sc)
self.schema = optimizer.util.get_common_pyspark_schema()
def compare_two_dfs(self, pandas_df_expected, df_to_test_rows):
df = self.hive_context.createDataFrame(df_to_test_rows, self.schema)
df_allocated = optimizer.algo.hwm.hwm_allocation(df, self.bookings, self.days)
pandas_df_allocated = df_allocated.select("*").toPandas()
print(pandas_df_expected)
print(pandas_df_allocated)
return self.assertTrue(assert_frame_equal(pandas_df_expected, pandas_df_allocated, check_dtype=False) == None)
def test_hwm_allocation_case1(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b3', 'b2'], [], 733, {'b1': 500, 'b3': 233}]
df_to_test_rows = [(['20180402', ['b1', 'b3', 'b2'], [], {}, 733])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case2(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1'], ['b2', 'b3'], 6047, {'b1': 500}]
df_to_test_rows = [(['20180402', ['b1'], ['b2', 'b3'], {}, 6047])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case3(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b2'], ['b1', 'b3'], 1410, {'b2': 800}]
df_to_test_rows = [(['20180402', ['b2'], ['b1', 'b3'], {}, 1410])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case4(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b3'], ['b1', 'b2'], 12241, {'b3': 1000}]
df_to_test_rows = [(['20180402', ['b3'], ['b1', 'b2'], {}, 12241])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case5(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b2'], ['b3'], 3575, {'b1': 500, 'b2': 800}]
df_to_test_rows = [(['20180402', ['b1', 'b2'], ['b3'], {}, 3575])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case6(self):
pandas_df_expected = | pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated']) | pandas.DataFrame |
import doctest
import os
from unittest import TestCase
import pandas as pd
import xarray as xr
from pysd.tools.benchmarking import assert_frames_close
_root = os.path.dirname(__file__)
class TestUtils(TestCase):
def test_xrsplit(self):
import pysd
array1d = xr.DataArray([0.5, 0., 1.],
{'ABC': ['A', 'B', 'C']},
['ABC'])
array2d = xr.DataArray([[0.5, -1.5],
[-1., -0.5],
[-0.75, 0.]],
{'ABC': ['A', 'B', 'C'],
'XY': ['X', 'Y']},
['ABC', 'XY'])
array3d = xr.DataArray([[[0.5, 4.], [-1.5, 3.]],
[[-1., 2.], [-0.5, 5.5]],
[[-0.75, 0.75], [0., -1.]]],
{'ABC': ['A', 'B', 'C'],
'XY': ['X', 'Y'],
'FG': ['F', 'G']},
['ABC', 'XY', 'FG'])
s1d = pysd.utils.xrsplit(array1d)
s2d = pysd.utils.xrsplit(array2d)
s3d = pysd.utils.xrsplit(array3d)
# check length
self.assertEqual(len(s1d), 3)
self.assertEqual(len(s2d), 6)
self.assertEqual(len(s3d), 12)
# check all values for 1d
self.assertIn(xr.DataArray(0.5, {'ABC': ['A']}, ['ABC']), s1d)
self.assertIn(xr.DataArray(0., {'ABC': ['B']}, ['ABC']), s1d)
self.assertIn(xr.DataArray(1., {'ABC': ['C']}, ['ABC']), s1d)
# check some values for 2d and 3d
self.assertIn(xr.DataArray(0.5,
{'ABC': ['A'], 'XY': ['X']},
['ABC', 'XY']),
s2d)
self.assertIn(xr.DataArray(-0.5,
{'ABC': ['B'], 'XY': ['Y']},
['ABC', 'XY']),
s2d)
self.assertIn(xr.DataArray(-0.5,
{'ABC': ['B'], 'XY': ['Y'], 'FG': ['F']},
['ABC', 'XY', 'FG']),
s3d)
self.assertIn(xr.DataArray(0.75,
{'ABC': ['C'], 'XY': ['X'], 'FG': ['G']},
['ABC', 'XY', 'FG']),
s3d)
def test_get_return_elements_subscirpts(self):
import pysd
self.assertEqual(
pysd.utils.get_return_elements(
["Inflow A[Entry 1,Column 1]",
"Inflow A[Entry 1,Column 2]"],
{'Inflow A': 'inflow_a'}),
(['inflow_a'],
{'Inflow A[Entry 1,Column 1]': ('inflow_a',
('Entry 1', 'Column 1')),
'Inflow A[Entry 1,Column 2]': ('inflow_a',
('Entry 1', 'Column 2'))}
)
)
def test_get_return_elements_realnames(self):
import pysd
self.assertEqual(
pysd.utils.get_return_elements(
["Inflow A", "Inflow B"],
{'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'}),
(['inflow_a', 'inflow_b'],
{'Inflow A': ('inflow_a', None),
'Inflow B': ('inflow_b', None)}
)
)
def test_get_return_elements_pysafe_names(self):
import pysd
self.assertEqual(
pysd.utils.get_return_elements(
["inflow_a", "inflow_b"],
{'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'}),
(['inflow_a', 'inflow_b'],
{'inflow_a': ('inflow_a', None),
'inflow_b': ('inflow_b', None)}
)
)
def test_get_return_elements_not_found_error(self):
""""
Test for not found element
"""
import pysd
with self.assertRaises(KeyError):
pysd.utils.get_return_elements(
["inflow_a", "inflow_b", "inflow_c"],
{'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'})
def test_make_flat_df(self):
import pysd
df = pd.DataFrame(index=[1], columns=['elem1'])
df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])]
expected = pd.DataFrame(index=[1], data={'Elem1[B,F]': 6.})
return_addresses = {
'Elem1[B,F]': ('elem1', {'Dim1': ['B'], 'Dim2': ['F']})}
actual = pysd.utils.make_flat_df(df, return_addresses)
# check all columns are in the DataFrame
self.assertEqual(set(actual.columns), set(expected.columns))
assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8)
def test_make_flat_df_nosubs(self):
import pysd
df = pd.DataFrame(index=[1], columns=['elem1', 'elem2'])
df.at[1] = [25, 13]
expected = pd.DataFrame(index=[1], columns=['Elem1', 'Elem2'])
expected.at[1] = [25, 13]
return_addresses = {'Elem1': ('elem1', {}),
'Elem2': ('elem2', {})}
actual = pysd.utils.make_flat_df(df, return_addresses)
# check all columns are in the DataFrame
self.assertEqual(set(actual.columns), set(expected.columns))
self.assertTrue(all(actual['Elem1'] == expected['Elem1']))
self.assertTrue(all(actual['Elem2'] == expected['Elem2']))
def test_make_flat_df_return_array(self):
""" There could be cases where we want to
return a whole section of an array - ie, by passing in only part of
the simulation dictionary. in this case, we can't force to float..."""
import pysd
df = pd.DataFrame(index=[1], columns=['elem1', 'elem2'])
df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])]
expected = pd.DataFrame(index=[1], columns=['Elem1[A, Dim2]', 'Elem2'])
expected.at[1] = [xr.DataArray([[1, 2, 3]],
{'Dim1': ['A'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])]
return_addresses = {
'Elem1[A, Dim2]': ('elem1', {'Dim1': ['A'],
'Dim2': ['D', 'E', 'F']}),
'Elem2': ('elem2', {})}
actual = pysd.utils.make_flat_df(df, return_addresses)
# check all columns are in the DataFrame
self.assertEqual(set(actual.columns), set(expected.columns))
# need to assert one by one as they are xarrays
self.assertTrue(
actual.loc[1, 'Elem1[A, Dim2]'].equals(
expected.loc[1, 'Elem1[A, Dim2]']))
self.assertTrue(
actual.loc[1, 'Elem2'].equals(expected.loc[1, 'Elem2']))
def test_make_flat_df_flatten(self):
import pysd
df = | pd.DataFrame(index=[1], columns=['elem1', 'elem2']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
##################################
# Author: <NAME>
# Copyright © 2020 The Board of Trustees of the Royal Botanic Gardens, Kew
##################################
#
# # wcvp_taxo
# wcvp_taxo is a python3 script for matching and resolving scientific names against the WCVP database (https://wcvp.science.kew.org/)
#
# ## Input
# ### A. Input files
# The script requires two input tables: The WCVP database and a file with species names to match on WCVP
# 1. **WCVP database**: must be downloaded from http://sftp.kew.org/pub/data-repositories/WCVP/. It will be filtered and save by the script in pickle format. If you whish to update the WCVP database, deleted the .pkl file.
# 2. **Sample file**: This spreadsheet must be in **.csv** format and contain at least one column with the scientific names you wish to match in WCVP. By default the script will look for a column named **scientific_name**. Otherwise it will look for a column called **Species**. If the species name is spread in two columns **(Genus, Species)**, the script with recognize it automatically.
#
# ### B. Parameters
# These parameters are optional and can be accessed with python wcvp_taxo.py -h
# - **-g, --resolve_genus**: Find taxa for scientific names written in genus sp. format
# - **-s, --similar_tax_method**: Find most similar taxa for misspelled taxa. <br>
# Possibles values are:
# - **similarity_genus**: Search for similar scientific name in WCVP assuming genus is correct (fast)
# - **similarity**: Search for similar scientific name in WCVP (slow)
# - **request_kewmatch**: Search for similar scientific name using kewmatch (online) (ok if less than <200 queries)
# - **-d, --duplicate_action**. Action to take when multiple wcvp entries match the provided scientific_name. <br>
# Possibles values are:
# - **rank**: reduce duplicates by prioritizing accepted > unplaced > synonym > homotypic_synonym taxonomic status (keep first entry).
# - **divert**: divert duplicates to _duplicates.csv
# - **divert_taxonOK**: divert duplicates to _duplicates.csv, unless all matching entries have the same taxon name in WCVP (keep first entry)
# - **divert_speciesOK**: divert duplicates to _duplicates.csv, unless all matching entries have the same species name in WCVP (keep first entry)
# - **divert_genusOK**: divert duplicates to _duplicates.csv, unless all matching entries have the same genus name in WCVP (keep first entry and rename as genus sp.)
# - **-oc, --only_changes**: Output file only contains IDs that have a different taxonomy than provided (species, genus or family if provided)
# - **-os, --simple_output**: Output file is simplified to 4 columns: ID, kew-id, Ini_sci_name, sci_name
# - **-v, --verbose**: verbose output in console
#
#
# ## Example
# ```console
# python wcvp_taxo.py wcvp_export.txt sample_file.csv -g -s similarity_genus -d divert_taxonOK
# python wcvp_taxo.py wcvp_export.txt sample_file.csv
# python wcvp_taxo.py wcvp_export.txt sample_file.csv -oc -os -s similarity --verbose -d divert
# python wcvp_taxo.py wcvp_export.txt sample_file.csv -g -s similarity -d rank --verbose
# ```
#
# ## Output
# For the example above, the script will output the following tables:
# * **sample_file_wcvp.csv**: Samples for which the scientific name are resolved.
# * **sample_file_duplicates.csv**: Samples for which the scientific name matched multiple WCVP entries.
# * **sample_file_unresolved.csv**: Samples for which the scientific name did not match any WCVP entries.
#
#
# ## Pipeline
# ### Pre-processing
# * Load wcvp database. If only text file exist, saving as .pkl.
# * Find column containing scientific names. scientific_name or sci_name (default), Species or Genus + Species otherwise.
# * Search for column with unique IDs. First column in table will be selected. Creates column with unique IDs if it doesn't exist. Will not pick sci_name or Species as ID.
#
# ### Initial checks
# * Check if Ini_scinames are written as Genus sp.
# * Check if Ini_scinames exist in WCVP
# * Optional. Find similar names if not in WCVP
# * Check if Ini_scinames have duplicate entries
# * Proceed to matching for valid scientific names
#
# ### Matching & Resolving
# 1. Find accepted and unplaced matches.
# 2. Resolves synonyms and homotypic synonyms.
# 3. Resolve duplicates.
# 4. Output tables
#
# ## Dependencies
# pandas, tqdm<br>
# for similarity: difflib, requests, ast<br>
# numpy, os, argparse, sys
# In[1]:
import pandas as pd
from tqdm import tqdm
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
import os
import argparse
import sys
# ## Parameters
# In[2]:
parser = argparse.ArgumentParser(
description='Script used to match species names with wcvp. Requires at least the paths to the wcvp\
file and to a .csv file containing scientific names (species or genus + species)')
parser.add_argument("wcvp_path", type=str,
help="path to wcvp_export.txt, \
download from http://sftp.kew.org/pub/data-repositories/WCVP/")
parser.add_argument("df_path", type=str,
help="path to spreadsheet in .csv format. Note output will be in the same folder")
parser.add_argument("-g", "--resolve_genus",
help="Optional. find taxa for scientific names written in genus sp. format",
action="store_true", default=False)
parser.add_argument("-s",'--similar_tax_method',
help="Optional. Find most similar taxa for misspelled taxa. possibles values are: \
similarity_genus, similarity, request_kew", action="store", default=None)
parser.add_argument("-d",'--duplicate_action',
help="Optional. Action to take when multiple wcvp taxon match to a sci_name. possibles values are: \
rank, divert, divert_taxonOK, divert_speciesOK, divert_genusOK.\
\n\n rank: reduce duplicates by prioritizing accepted > unplaced > synonym > homotypic synonym \
taxonomic status. \n\n divert: flag duplicates, remove them from _wcvp.csv output and write them to _duplicates.csv",
action="store", default='rank')
parser.add_argument("-oc", "--only_changes",
help="Optional. Output file only contains IDs that have a different taxonomy than provided",
action="store_true", default=False)
parser.add_argument("-od", "--output_duplicates",
help="Optional. Output a separate file for duplicates as _duplicates.csv",
action="store_true", default=False)
parser.add_argument("-os", "--simple_output",
help="Optional. Specify which columns of the input file should be kept, in addition to ID, species name \
and WCVP columns kew-id and species name. \
e.g. --simple_output ['idSequencing','NumReads'] will produce an output with \
idSequencing,NumReads, kew-id, Ini_sci_name, sci_name",
action="store_true", default=False)
parser.add_argument("-v", "--verbose",
help="Optional. verbose output in console",
action="store_true", default=False)
args = parser.parse_args()
wcvp_path = args.wcvp_path
df_path = args.df_path
resolve_genus=args.resolve_genus
find_most_similar=args.find_most_similar
dupl_action=args.duplicate_action
only_changes=args.only_changes
simple_output=args.simple_output
verbose=args.verbose
status_keep=['Accepted','Unplaced']
# In[3]:
# ## Jupyter Notebook
# wcvp_path='wcvp_v4_mar_2021.txt'
# df_path='../PAFTOL_DB/2021-03-19_paftol_export.csv'
# resolve_genus=True
# find_most_similar='similarity'
# dupl_action='rank'
# verbose=False
# only_changes=True
# # simple_output=False
# simple_output=['idPaftol','idSequencing','ExternalSequenceID','DataSource','Project','Taxonomical_Notes']
# status_keep=['Accepted','Unplaced']
# ## Functions
# ### Data processing functions
# In[4]:
# Load wcvp file and save as pickle for faster loading
def load_wcvp(wcvp_path):
print('Loading WCVP...',end='')
# Load pickel
if os.path.exists(wcvp_path.replace('.txt','.pkl')):
print('found .pkl...',end='')
wcvp = pd.read_pickle(wcvp_path.replace('.txt','.pkl'))
elif os.path.exists(wcvp_path):
wcvp = pd.read_table(wcvp_path,sep='|',encoding='utf-8')
print('found .txt, ',end='')
# Remove extra columns
wcvp = wcvp.drop(columns=['parent_kew_id','parent_name','parent_authors'])
print('saving to .pkl...',end='')
wcvp.to_pickle(wcvp_path.replace('.txt','.pkl'))
else:
print('could not find',wcvp_path)
sys.exit()
print(wcvp.shape[0],'entries')
return wcvp
def load_df(df_path):
print('Loading dataset...',end='')
try:
smpl_df = pd.read_csv(df_path,encoding='utf-8')
print(smpl_df.shape[0],'entries')
return smpl_df
except:
print('could not find',df_path)
sys.exit()
# In[5]:
#Define ID column
def GetIDcol(df):
#Check for columns with all unique values
col_unique=(df.nunique()==df.shape[0]).to_frame().reset_index().rename(columns={0:'unique','index':'column'})
col_unique = col_unique[col_unique.unique==True]
col_unique = col_unique[~col_unique['column'].isin(['Ini_sci_name','Ini_Genus','Ini_Species'])]
if col_unique.shape[0]>0:
print('found',col_unique.shape[0],'ID column:',end='')
colsID=list(col_unique['column'])
colID=colsID[0]
print(colID)
else:
print('No ID column, create ID from index')
colID='ID'
#Return new col with ID
return colID
# In[6]:
#Find which column contains the scientific name to match
def define_sci_name(smpl_df, verbose=False):
col_taxo=list(smpl_df.columns[smpl_df.columns.str.contains(
'family|genus|species|infraspecies|sci_name|scientific_name',case=False)])
for itaxo in col_taxo:
smpl_df = smpl_df.rename(columns = {itaxo:'Ini_' + itaxo.capitalize()})
if verbose:
print('renaming ' + itaxo + ' to Ini_' + itaxo, end=', ')
# Use sci_name if provided
if 'Ini_Sci_name' in smpl_df.columns:
print('\nScientific Name is sci_name')
smpl_df = smpl_df.rename(columns = {'Ini_Sci_name':'Ini_sci_name'})
elif 'Ini_Scientific_name' in smpl_df.columns:
print('\nScientific Name is scientific_name')
smpl_df = smpl_df.rename(columns = {'Ini_Scientific_name':'Ini_sci_name'})
else:
# Identify is scientific name is in 1 or two columns
try:
avg_word_sp=smpl_df['Ini_Species'].str.split().str.len().mean()
print('avg words in Ini_Species:',round(avg_word_sp,1))
if round(avg_word_sp)==1:
print('Scientific Name (Ini_sci_name) is Ini_Genus + Ini_Species')
smpl_df['Ini_sci_name'] = smpl_df['Ini_Genus'] + ' ' + smpl_df['Ini_Species']
elif round(avg_word_sp)>=2:
print('Scientific Name (Ini_sci_name) is Ini_Species')
smpl_df['Ini_sci_name'] = smpl_df['Ini_Species']
except:
print('ERROR: Could not identify species column')
sys.exit()
return smpl_df
# ### WCVP related functions
# In[7]:
def get_by_taxon_name(df, wcvp):
tmp_wcvp=wcvp[wcvp.taxon_name.isin(df.sci_name)]
match = pd.merge(df, tmp_wcvp, how='inner', left_on='sci_name', right_on='taxon_name')
return match
# In[8]:
def get_by_kew_id(df, wcvp):
tmp_wcvp=wcvp[wcvp.kew_id.isin(df.kew_id)]
match = pd.merge(df, tmp_wcvp, how='inner', on='kew_id')
return match
# In[9]:
#Find closely matching scientific name using difflib.get_close_matches if scientific name was not found
def find_sim(sci_name, wcvp, only_from_genus=True):
if sci_name==sci_name:
# Search for similar sci_name with same genus
smpl_genus=sci_name.split(' ')[0]
wcvp_gen=wcvp[wcvp.genus.isin([smpl_genus])]
if wcvp_gen.shape[0]>0:
sim_tax = difflib.get_close_matches(sci_name,
wcvp_gen.taxon_name.astype(str), n=1, cutoff=.9)
if len(sim_tax)>0:
return sim_tax[0]
# If didn't work, search for similar sci_name
else:
if only_from_genus==False:
sim_tax = difflib.get_close_matches(sci_name,
wcvp.taxon_name.astype(str), n=1, cutoff=.9)
if len(sim_tax)>0:
return sim_tax[0]
else:
return None
else:
return None
# print(find_sim('Combretum mussaendiflora', wcvp))
# print(find_sim('Scaveola humilis', wcvp, only_from_genus=False))
# In[10]:
#Find closely matching scientific name using kew namematching system
def kew_namematch(sci_name, verbose=False):
url = "http://namematch.science.kew.org/api/v2/powo/csv"
payload = '{\"column\": 0,\"headers\": false,\"outputAllColumns\": true,\"currentChunk\": 0,\"data\": [[\"' + sci_name + '\"]]}'
headers = {
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data = payload)
content=str(response.text).replace("b'",'').replace("'",'')
if verbose:
print(content)
stats_dict=ast.literal_eval(content)['stats']
if stats_dict['matched']>0:
try:
record_dict=ast.literal_eval(content)['records']
rec_df = pd.DataFrame(record_dict[1], index =record_dict[0]).T
return rec_df.loc[0,'Scientific Name']
except:
print('ERROR: Failed to convert to dict -',sci_name)
return None
except:
print('ERROR: No valid response -',sci_name)
return None
# print(kew_namematch('Combretum mussaendiflora',verbose=True))
# In[11]:
#Find closely matching scientific name
def get_sim(df, wcvp, find_most_similar, verbose=False):
print('\nLooking for most similar names')
df['Similar_sci_name']=np.nan
for idx, row in tqdm(df.iterrows(), total=df.shape[0]):
if find_most_similar=='similarity_genus':
df.loc[idx,'Similar_sci_name']=find_sim(row.sci_name, wcvp, only_from_genus=True)
elif find_most_similar=='similarity':
df.loc[idx,'Similar_sci_name']=find_sim(row.sci_name, wcvp, only_from_genus=False)
elif find_most_similar=='kewmatch':
df.loc[idx,'Similar_sci_name']=kew_namematch(row.sci_name)
if verbose:
print(idx,row.sci_name,':',df.loc[idx,'Similar_sci_name'])
df['InWCVP']=(df.Similar_sci_name.isna()==False)
df['Similar_match']=(df.Similar_sci_name.isna()==False)
# Recover accepted and synonym taxa for found similar names
df = df.drop(columns=['sci_name']).rename(columns={'Similar_sci_name':'sci_name'})
return df
# In[12]:
def get_duplicates_type(df):
df = df.sort_values('ID').reset_index().drop(columns='index')
df['Duplicate_type']='Different_taxa'
ID_ls=df.ID.unique()
for iID in ID_ls:
tmp_df=df[df.ID==iID]
# Check if all entries have the same genus
if tmp_df.genus.nunique()==1:
df.loc[tmp_df.index,'Duplicate_type']='Same_Genus'
# Check if all entries have the same species
if len(set(return_dupl.genus + ' ' + return_dupl.species))==1:
df.loc[tmp_df.index,'Duplicate_type']='Same_Species'
# Check if all entries have the same taxon name
if tmp_df.taxon_name.nunique()==1:
df.loc[tmp_df.index,'Duplicate_type']='Same_Taxon'
return df
# ## Main
# In[13]:
if __name__ == "__main__":
print('\n\n##### wcvp_taxo v0.5 ##### \nAuthor: <NAME> \nLast update: 2021-03-25\n')
print(wcvp_path, df_path, 'g:', resolve_genus, ' s:', find_most_similar, ' d:', dupl_action,
' oc:', only_changes, ' os:', simple_output, ' v:', verbose)
#Load libraries depending on the similarity method
if find_most_similar in ['similarity_genus','similarity']:
import difflib
if find_most_similar=='request_kew':
import requests
import ast
## Loading and preparing data
print('\n\nLoading and preparing data')
wcvp = load_wcvp(wcvp_path)
smpl_df = load_df(df_path)
# Find scientific names
smpl_df = define_sci_name(smpl_df)
# Select or make ID column
colID=GetIDcol(smpl_df)
if colID=='ID':
smpl_df['ID']=smpl_df.index
else:
smpl_df['ID']=smpl_df[colID]
## Initial checks
smpl_df['sci_name']=smpl_df['Ini_sci_name']
print('\n\nInitial checks')
# Check if Ini_scinames are written as Genus sp.
smpl_df['Genus_sp'] = smpl_df['sci_name'].str.split(' ',expand=True)[1].isin(['sp.','sp'])
if smpl_df.Genus_sp.sum()>0:
smpl_df.loc[smpl_df.Genus_sp,'sci_name'] = smpl_df.loc[smpl_df.Genus_sp,'sci_name'].str.split(' ',expand=True)[0]
if resolve_genus:
print('Genus sp:',(smpl_df.Genus_sp==True).sum(),'IDs with Genus sp. as sci_name')
# Check if Ini_scinames exist in WCVP
smpl_df['InWCVP']=smpl_df.sci_name.isin(wcvp.taxon_name)
print('Missing taxa:',(smpl_df.InWCVP==False).sum(),'IDs not in WCVP')
# Optional. Find similar names if not in WCVP
if find_most_similar in ['similarity_genus','similarity','request_kew']:
resolved_sim = get_sim(smpl_df[smpl_df.InWCVP==False],wcvp=wcvp,find_most_similar=find_most_similar,verbose=verbose)
smpl_df = pd.concat([smpl_df[~smpl_df.ID.isin(resolved_sim.ID)], resolved_sim])
print('find_most_similar: found',smpl_df.Similar_match.sum(),'IDs by similarity')
# Check if Ini_scinames have duplicate entries
dupl_taxon_names = wcvp[wcvp.taxon_name.isin(smpl_df.sci_name)]['taxon_name'].to_frame().groupby('taxon_name').size() .to_frame().reset_index().rename(columns={0:'count'})
dupl_taxon_names = dupl_taxon_names[dupl_taxon_names['count']>1].taxon_name
smpl_df['Duplicates']=smpl_df.sci_name.isin(dupl_taxon_names)
print('Duplicates:',(smpl_df.Duplicates==True).sum(),'IDs matching multiple entries in WCVP')
# Simpler dataframe
if smpl_df[~smpl_df.InWCVP].shape[0]>0:
print('No match for',smpl_df[~smpl_df.InWCVP].shape[0],'IDs')
smpl_dfs = smpl_df[smpl_df.InWCVP][['ID','sci_name','Duplicates']]
## get WCVP taxons
# Recover accepted and unplaced taxa
print('\n\nMatching & Resolving')
match = get_by_taxon_name(smpl_dfs[(smpl_dfs.Duplicates==False)], wcvp)
return_df = match[match.taxonomic_status.isin(status_keep)]
print('After direct matching: found match for',return_df.shape[0],'IDs')
# Resolving synonyms
synonyms = match[match.taxonomic_status.isin(['Synonym','Homotypic_Synonym'])] .rename(columns={'kew_id':'Ini_kew_id','accepted_kew_id':'kew_id','taxonomic_status':'Ini_taxonomic_status'})
cols_syn=list(smpl_dfs.columns) + ['Ini_kew_id','Ini_taxonomic_status']
return_syn = get_by_kew_id(df = synonyms[cols_syn + ['kew_id']], wcvp = wcvp)
return_df= | pd.concat([return_df,return_syn]) | pandas.concat |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from hashlib import md5
from typing import Dict, Iterable, Optional, Set, Type
import numpy as np
import pandas as pd
from ax.core.base import Base
from ax.core.types import TFidelityTrialEvaluation, TTrialEvaluation
TPdTimestamp = pd.Timestamp
COLUMN_DATA_TYPES = {
"arm_name": str,
"metric_name": str,
"mean": np.float64,
"sem": np.float64,
"trial_index": np.int64,
"start_time": TPdTimestamp,
"end_time": TPdTimestamp,
"n": np.int64,
"frac_nonnull": np.float64,
"random_split": np.int64,
"fidelities": str, # Dictionary stored as json
}
REQUIRED_COLUMNS = {"arm_name", "metric_name", "mean"}
class Data(Base):
"""Class storing data for an experiment.
The dataframe is retrieved via the `df` property. The data can be stored
to gluster for future use by attaching it to an experiment using
`experiment.add_data()` (this requires a description to be set.)
Attributes:
df: DataFrame with underlying data, and required columns.
description: Human-readable description of data.
"""
def __init__(
self, df: Optional[pd.DataFrame] = None, description: Optional[str] = None
) -> None:
"""Init Data.
Args:
df: DataFrame with underlying data, and required columns.
description: Human-readable description of data.
"""
# Initialize with barebones DF.
if df is None:
self._df = pd.DataFrame(columns=self.required_columns())
else:
missing_columns = self.required_columns() - (
self.required_columns() & set(df.columns.tolist())
)
if len(missing_columns) > 0:
raise ValueError(
f"Dataframe must contain required columns {list(missing_columns)}."
)
extra_columns = set(df.columns.tolist()) - (
set(self.column_data_types().keys()) & set(df.columns.tolist())
)
if len(extra_columns) > 0:
raise ValueError(f"Columns {list(extra_columns)} are not supported.")
df = df.dropna(axis=0, how="all").reset_index(drop=True)
df = self._safecast_df(df=df)
# Reorder the columns for easier viewing
col_order = [c for c in self.column_data_types() if c in df.columns]
self._df = df[col_order]
self.description = description
@classmethod
def _safecast_df(cls, df: pd.DataFrame) -> pd.DataFrame:
"""Function for safely casting df to standard data types.
Needed because numpy does not support NaNs in integer arrays.
Args:
df: DataFrame to safe-cast.
Returns:
safe_df: DataFrame cast to standard dtypes.
"""
dtype = {
# Pandas timestamp handlng is weird
col: "datetime64[ns]" if coltype is TPdTimestamp else coltype
for col, coltype in cls.column_data_types().items()
if col in df.columns.values
and not (
cls.column_data_types()[col] is np.int64
and df.loc[:, col].isnull().any()
)
}
return df.astype(dtype=dtype)
@staticmethod
def required_columns() -> Set[str]:
"""Names of required columns."""
return REQUIRED_COLUMNS
@staticmethod
def column_data_types() -> Dict[str, Type]:
"""Type specification for all supported columns."""
return COLUMN_DATA_TYPES
@staticmethod
def from_multiple_data(data: Iterable["Data"]) -> "Data":
if sum(1 for _ in data) == 0: # Return empty data if empty iterable.
return Data()
return Data(df=pd.concat([datum.df for datum in data], axis=0, sort=True))
@staticmethod
def from_evaluations(
evaluations: Dict[str, TTrialEvaluation],
trial_index: int,
sample_sizes: Optional[Dict[str, int]] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
) -> "Data":
"""
Convert dict of evaluations to Ax data object.
Args:
evaluations: Map from arm name to metric outcomes (itself a mapping
of metric names to tuples of mean and optionally a SEM).
trial_index: Trial index to which this data belongs.
sample_sizes: Number of samples collected for each arm.
start_time: Optional start time of run of the trial that produced this
data, in milliseconds.
end_time: Optional end time of run of the trial that produced this
data, in milliseconds.
Returns:
Ax Data object.
"""
records = [
{
"arm_name": name,
"metric_name": metric_name,
"mean": evaluation[metric_name][0],
"sem": evaluation[metric_name][1],
"trial_index": trial_index,
}
for name, evaluation in evaluations.items()
for metric_name in evaluation.keys()
]
if start_time is not None or end_time is not None:
for record in records:
record.update({"start_time": start_time, "end_time": end_time})
if sample_sizes:
for record in records:
record["n"] = sample_sizes[str(record["arm_name"])]
return Data(df= | pd.DataFrame(records) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Name: csv_plot_heatmap.py
# Description:
#
# Author: m.akei
# Copyright: (c) 2020 by m.na.akei
# Time-stamp: <2020-08-30 09:56:44>
# Licence:
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# ----------------------------------------------------------------------
import argparse
import textwrap
import sys
import re
import json
from pathlib import Path
import math
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
import pandas as pd
from scipy.stats import moment
VERSION = 1.0
HIST_FUNC_LIST = ['count', 'sum', 'avg', 'min', 'max']
def init():
# argparse --- コマンドラインオプション、引数、サブコマンドのパーサー Python 3.8.5 ドキュメント https://docs.python.org/ja/3/library/argparse.html
arg_parser = argparse.ArgumentParser(description="plot histogram chart with counting values",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
remark:
the column for '--facet' and '--ctegory' should have few uni values.
If '--xrange' was given, valuse in the column was clipped into the range and plotted with bins given by '--nbins'.
for '--pareto_chart', only followings are available
'--xrange', '--yrange', '--nbins', '--output', '--format', '--with', '--height', '--packed_html'
'--pareto_sort_mode=axis' may be usefull to estimate threhold.
for animation column, colon ":" must be escaped by "\". ex: "Animation\:Column".
if datetime column was used as column for animation, format of datetime should be defined.
see datetime Basic date and time types Python 3.9.4 documentation https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
about '--nbin_mode', see Histogram - Wikipedia https://en.wikipedia.org/wiki/Histogram .
NOTE 's_and_s' means Shimazaki and Shinomoto's choice.
example:
csv_plot_histogram.py --nbins=50 --category="ABC004" --xrange=0.4,0.6 --output=test_plot_hist.html test_plot.csv "ABC001" "ABC002"
csv_plot_histogram.py --nbins=50 --category="ABC004" --side_hist=rug --output=test_plot_hist.html test_plot.csv "ABC001" "ABC002"
csv_plot_histogram.py --nbins=50 --category="ABC004" --side_hist=rug --log_y --xrange=0.4,0.6 --output=test_plot_hist.html test_plot.csv "ABC001" "ABC002"
csv_plot_histogram.py --nbin_mode="square-root" --output=test_plot_hist.html test_plot.csv "ABC001" "ABC002"
csv_plot_histogram.py --output=test.html --pareto_chart --nbins=100 a10.csv value
csv_plot_histogram.py --output=test.html --pareto_chart --pareto_sort_mode=axis --nbins=100 a10.csv value
csv_plot_histogram.py --output=test_hist_dt.html --datetime="%Y-%m-%d %H:%M:%S" test_bar.csv dt
'''))
arg_parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(VERSION))
arg_parser.add_argument("--title", dest="TITLE", help="title of chart", type=str, metavar='TEXT', default="")
arg_parser.add_argument("--nbins", dest="NBINS", help="number of bins,default=10", type=int, metavar='INT', default=10)
arg_parser.add_argument("--nbin_modes",
dest="NBIN_MODE",
help="method to evaluate number of bins. if given, '--nbins' is ignored.",
choices=["square-root", "sturges", "rice", "doane", "s_and_s", "freedman_diaconis"],
default=None)
arg_parser.add_argument("--side_hist",
dest="SIDE_HIST",
help="side histogram mode",
choices=['rug', 'box', 'violin', 'histogram'],
default=None)
arg_parser.add_argument("--facets",
dest="FACETS",
help="names of columns to make group with csv, 'row_facet,col_facet'",
type=str,
metavar='column[,column]',
default=None)
arg_parser.add_argument("--hist_func",
dest="HIST_FUNC",
help="function for histogram z-axis",
choices=HIST_FUNC_LIST,
default="count")
arg_parser.add_argument("--category", dest="CATEG", help="name of column as category", type=str, metavar='column', default=None)
arg_parser.add_argument("--category_orders",
dest="CATEG_ORDERS",
help="orders of elements in each category, with json format",
type=str,
metavar='JSON_STRING',
default=None)
arg_parser.add_argument("--animation_column",
dest="ANIMATION_COL",
help="name of column as aimation",
type=str,
metavar='column[:datetime_format]',
default=None)
arg_parser.add_argument("--datetime",
dest="XDATETIME",
help="format of x as datetime",
type=str,
metavar='DATETIME_FORMAT',
default=None)
arg_parser.add_argument("--xrange", dest="XRANGE", help="range of x", type=str, metavar='XMIN,XMAX')
arg_parser.add_argument("--yrange", dest="YRANGE", help="range of y", type=str, metavar='YMIN,YMAX')
arg_parser.add_argument("--log_x", dest="LOG_X", help="log-scaled x axis", action="store_true", default=False)
arg_parser.add_argument("--log_y", dest="LOG_Y", help="log-scaled y axis", action="store_true", default=False)
arg_parser.add_argument("--noautoscale", dest="NOAUTOSCALE", help="not autoscale x or y for facets", action="store_false")
arg_parser.add_argument("--output", dest="OUTPUT", help="path of output file", type=str, metavar="FILE")
arg_parser.add_argument("--format",
dest="FORMAT",
help="format of output, default=svg",
choices=["svg", "png", "jpg", "json", "html"],
default="svg")
arg_parser.add_argument("--packed_html",
dest="PACKED_HTML",
help="whether plotly.js is included in result html file, this is enable only for --format=html",
action="store_true")
arg_parser.add_argument("--width", dest="WIDTH", help="width of output", type=int, metavar='WIDTH', default=None)
arg_parser.add_argument("--height", dest="HEIGHT", help="height of output", type=int, metavar='HEIGHT', default=None)
arg_parser.add_argument("--pareto_chart", dest="PARETO", help="pareto chart mode", action="store_true", default=False)
arg_parser.add_argument("--pareto_sort_mode",
dest="PARETO_M",
help="sort mode for pareto mode, default=asscending count",
choices=["count", "axis"],
default="count")
arg_parser.add_argument('csv_file', metavar='CSV_FILE', help='csv files to read', nargs=1)
arg_parser.add_argument('x_column', metavar='X_COLUMN', help='name of colum as x-axis', nargs=1)
arg_parser.add_argument('y_column', metavar='Y_COLUMN', help='name of colum as weight of histogram', nargs="?")
args = arg_parser.parse_args()
return args
def make_categ_hist(df, column, sort_mode, params):
col = column
df_series = df[col]
if sort_mode == "count":
pareto_df = df_series.value_counts().sort_values(ascending=False).reset_index()
else:
pareto_df = df_series.value_counts().reset_index()
pareto_df.sort_index(ascending=True, inplace=True)
# pareto_df["index"] = pareto_df["index"].apply(lambda x: str(x))
pareto_df["index"] = pareto_df["index"].astype("string")
return pareto_df
def make_number_hist(df, column, sort_mode, params):
nphist_arg = {"bins": params["nbins"]}
if "y" in params:
nphist_arg.update({"weights": np.array(df[params["y"]])})
hist, bins = np.histogram(np.array(df[column]), **nphist_arg)
pareto_df = pd.DataFrame(columns=[column, "index"])
pareto_df["index"] = bins[:-1]
pareto_df[column] = hist
if sort_mode == "count":
pareto_df.sort_values(ascending=False, inplace=True, by=column)
pareto_df.reset_index(inplace=True, drop=True)
else:
pareto_df.sort_index(ascending=True, inplace=True)
# pareto_df["index"] = np.round(pareto_df["index"], 2)
pareto_df["index"] = pareto_df["index"].apply(lambda x: "{}".format(x))
return pareto_df
def plot_pareto_chart(df, column, sort_mode, params):
col = column
if df[column].dtype == float or df[column].dtype == int:
pareto_df = make_number_hist(df, column, sort_mode, params)
else:
pareto_df = make_categ_hist(df, column, sort_mode, params)
pareto_df["cumulative persentage"] = pareto_df[col].cumsum() / pareto_df[col].sum() * 100
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(x=pareto_df["index"], y=pareto_df[col], name="count"),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=pareto_df["index"], y=pareto_df["cumulative persentage"], name="cumulative persentage", mode='markers'),
secondary_y=True,
)
fig.update_layout(xaxis=dict(type="category"))
if "titile" in params:
fig.update_layout(title_text=params["title"])
if "range_y" in params:
fig.update_layout(yaxis_range=params["range_y"])
if "log_x" in params and params["log_x"]:
fig.update_xaxes(type="log")
if "log_y" in params and params["log_y"]:
fig.update_yaxes(type="log")
fig.update_xaxes(title_text=col)
fig.update_yaxes(title_text="count", secondary_y=False)
fig.update_yaxes(title_text="cumulative persentage", secondary_y=True)
return fig, pareto_df
def evaluate_number_of_bin(ds_0, mode):
ds = ds_0.dropna()
npts = len(ds)
nbin_tabl = {}
nbin_tabl["square-root"] = math.ceil(math.sqrt(npts))
nbin_tabl["sturges"] = math.ceil(math.log2(npts)) + 1
nbin_tabl["rice"] = math.ceil(2 * math.pow(npts, 1 / 3))
sig = math.sqrt((6 * (npts - 2)) / ((npts + 1) * (npts + 3)))
skw = moment(ds, moment=3)
nbin_tabl["doane"] = int(1 + math.log2(npts) + math.log2(1 + abs(skw) / sig))
nbin_tabl["s_and_s"] = evaluate_number_of_bin_Shimazaki_and_Shinomoto(ds)
nbin_tabl["freedman_diaconis"] = evaluate_number_of_bin_Freedman_Diaconis(ds)
if mode not in nbin_tabl:
mes = f"??error:csv_plot_histogram:invalid mode to evaluate number of bins:{mode}"
print(mes, file=sys.stderr)
raise ValueError(mes)
print(f"%inf:csv_plot_histgoram:evaluate_number_of_bin:required mode={mode}", file=sys.stderr)
for k in nbin_tabl.keys():
print(f"\t{k}={nbin_tabl[k]}", file=sys.stderr)
nbins = int(nbin_tabl[mode])
return nbins
def evaluate_number_of_bin_Shimazaki_and_Shinomoto(ds, N_MIN=4, N_MAX=100):
# GitHub - oldmonkABA/optimal_histogram_bin_width: Method to compute equal sized Optimal Histogram Bin Width https://github.com/oldmonkABA/optimal_histogram_bin_width
# https://www.neuralengine.org/res/code/python/histsample_torii.py https://www.neuralengine.org/res/code/python/histsample_torii.py
x = ds.array
x_max = max(x)
x_min = min(x)
N0 = np.arange(N_MIN, N_MAX)
D = (x_max - x_min) / N0 # bin size vector
Cost = np.zeros(shape=(np.size(D), 1))
Cost = np.zeros(np.size(D))
for i in range(np.size(N0)):
ki = np.histogram(x, bins=N0[i])
ki = ki[0]
k = np.mean(ki)
v = np.var(ki)
Cost[i] = (2 * k - v) / (D[i]**2)
idx = np.argmin(Cost)
cmin = Cost[idx] # minimum cost
optD = D[idx] # optiomum bin
print(f" {cmin}, {N0[idx]}, {optD}", file=sys.stderr)
return N0[idx]
def evaluate_number_of_bin_Freedman_Diaconis(ds):
iqr = ds.quantile(.75) - ds.quantile(.25)
npts = len(ds)
bin_width = 2 * iqr / math.pow(npts, 1 / 3)
nbin = int(np.ceil((ds.max() - ds.min()) / bin_width))
return nbin
if __name__ == "__main__":
args = init()
csv_file = args.csv_file[0]
output_format = args.FORMAT
packed_html = args.PACKED_HTML
width = args.WIDTH
height = args.HEIGHT
categ = args.CATEG
animation_col = args.ANIMATION_COL
output_file = args.OUTPUT
log_x = args.LOG_X
log_y = args.LOG_Y
no_auto_scale = args.NOAUTOSCALE
hist_func = args.HIST_FUNC
pareto_mode = args.PARETO
pareto_sort_mode = args.PARETO_M
if output_file is None:
if csv_file != "-":
output_file = Path(csv_file).stem + "_histogram_" + hist_func + "." + output_format
else:
output_file = sys.stdout.buffer
else:
output_format = Path(output_file).suffix[1:]
if csv_file == "-":
csv_file = sys.stdin
title = args.TITLE
side_hist_mode = args.SIDE_HIST
x_col_name = args.x_column[0]
y_col_name = args.y_column
x_datetime_format = args.XDATETIME
xrange_s = args.XRANGE
xrange = None
if xrange_s is not None:
xrange = re.split(r'\s*,\s*', xrange_s)
if len(xrange) < 2:
print("invalid '--xrange': {}".format(xrange_s), file=sys.stderr)
exit(1)
else:
xrange = list(map(float, xrange))
yrange_s = args.YRANGE
yrange = None
if yrange_s is not None:
yrange = re.split(r'\s*,\s*', yrange_s)
if len(yrange) < 2:
print("invalid '--yrange': {}".format(yrange_s), file=sys.stderr)
exit(1)
else:
yrange = list(map(float, yrange))
nbin = args.NBINS
nbin_mode = args.NBIN_MODE
facets = args.FACETS
facet_mode = False
if facets is not None:
facet_mode = True
cvs = re.split(r'\s*,\s*', facets)
if len(cvs) > 1:
row_facet = cvs[0]
col_facet = cvs[1]
else:
row_facet = cvs[0]
col_facet = None
categ_orders_s = args.CATEG_ORDERS
categ_orders = {}
if categ_orders_s is not None:
try:
categ_orders = json.loads(categ_orders_s)
except json.decoder.JSONDecodeError as e:
print("??Error: '--category_orders' has invalid format: {}".format(e), file=sys.stderr)
print(categ_orders_s)
sys.exit(1)
#--- processing
# 2D Histograms | Python | Plotly https://plotly.com/python/2D-Histogram/
csv_df = pd.read_csv(csv_file)
if nbin_mode is not None:
nbin = evaluate_number_of_bin(csv_df[x_col_name], nbin_mode)
print(f"%inf:csv_plot_histogram:number of bins={nbin}", file=sys.stderr)
nbin_params = {"nbins": nbin}
if y_col_name is not None:
fig_params = {"x": x_col_name, "y": y_col_name}
else:
fig_params = {"x": x_col_name}
if x_datetime_format is not None:
try:
csv_df[x_col_name] = pd.to_datetime(csv_df[x_col_name], format=x_datetime_format)
except Exception as e:
print(f"??error: invalid datetime format {x_datetime_format} for {x_col_name}", file=sys.stderr)
sys.exit(1)
fig_params["histfunc"] = hist_func
if categ is not None:
z_params = {"color": categ, "opacity": 1.0}
else:
z_params = None
side_hist_params = None
if side_hist_mode is not None:
side_hist_params = {"marginal": side_hist_mode}
else:
side_hist_params = None
if facet_mode:
facet_params = {}
if row_facet is not None and len(row_facet) > 0:
facet_params.update(dict(facet_row=row_facet))
if col_facet is not None and len(col_facet) > 0:
facet_params.update(dict(facet_col=col_facet))
else:
facet_params = None
if side_hist_params is not None:
fig_params.update(side_hist_params)
if facet_params is not None:
fig_params.update(facet_params)
fig_params.update(nbin_params)
if z_params is not None:
fig_params.update(z_params)
if title is not None and len(title) > 0:
fig_params["title"] = title
if width is not None:
fig_params["width"] = int(width)
if height is not None:
fig_params["height"] = int(height)
if xrange is not None:
fig_params["range_x"] = xrange
csv_df[x_col_name].clip(lower=xrange[0], upper=xrange[1], inplace=True)
if yrange is not None:
fig_params["range_y"] = yrange
if log_x:
fig_params["log_x"] = True
if log_y:
fig_params["log_y"] = True
if csv_df[x_col_name].dtype in [str, object]:
fig_params["category_orders"] = {x_col_name: sorted(csv_df[x_col_name].value_counts().keys())}
if animation_col is not None:
cvs = re.split(r"\s*(?<!\\):\s*", animation_col, maxsplit=1)
ani_col = cvs[0]
ani_col = re.sub(r"\\:", ":", ani_col)
fig_params["animation_frame"] = ani_col
if len(csv_df[ani_col].value_counts()) > 100:
print("??error:csv_plot_bar:too many values in column for animation:{}".format(ani_col), file=sys.stderr)
sys.exit(1)
if len(cvs) > 1:
t_format = cvs[1]
csv_df = | pd.to_datetime(csv_df[ani_col], format=t_format) | pandas.to_datetime |
import logging
import traceback
import pandas as pd
import numpy as np
import seaborn as sns
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.ticker as ticker
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
import inStrain.plotting.utilities
from inStrain.plotting.utilities import plot_genome
from inStrain.plotting.utilities import estimate_breadth
def genome_plot_from_IS(IS, plot_dir=False, **kwargs):
# Load the required data
try:
stb = IS.get('scaffold2bin')
b2s = defaultdict(list)
for s, b in stb.items():
b2s[b].append(s)
assert len(b2s.keys()) > 0
# Load the cache
covTs = kwargs.get('covT')#, IS.get('covT'))
clonTs = kwargs.get('clonT')#, IS.get('clonT'))
raw_linkage_table = kwargs.get('raw_linkage_table')#, IS.get('raw_linkage_table'))
cumulative_snv_table = kwargs.get('cumulative_snv_table')#, IS.get('cumulative_snv_table'))
scaffold2length = IS.get('scaffold2length')
rl = IS.get_read_length()
profiled_scaffolds = set(scaffold2length.keys())
except:
logging.error("Skipping plot 2 - you don't have all required information. You need to run inStrain genome_wide first")
traceback.print_exc()
return
# Make the plot
logging.info("Plotting plot 2")
name = 'genomeWide_microdiveristy_metrics.pdf'
pp = PdfPages(plot_dir + name)
for genome, scaffolds in b2s.items():
if not plot_genome(genome, IS, **kwargs):
continue
present_scaffolds = list(set(scaffolds).intersection(set(profiled_scaffolds)))
Wdb, breaks, midpoints = load_windowed_metrics(present_scaffolds,
scaffold2length,
rl,
report_midpoints=True,
covTs=covTs, clonTs=clonTs,
raw_linkage_table=raw_linkage_table,
cumulative_snv_table=cumulative_snv_table)
if len(Wdb) == 0:
logging.debug(f"{genome} could not have windowed metrics loaded")
continue
genomeWide_microdiveristy_metrics_plot(Wdb, breaks, title=genome)
fig = plt.gcf()
fig.set_size_inches(8, 5)
fig.tight_layout()
pp.savefig(fig)#, bbox_inches='tight')
#plt.show()
plt.close(fig)
# Save the figure
pp.close()
#plt.show()
plt.close('all')
def scaffold_inspection_from_IS(IS, plot_dir=False, **kwargs):
# Load the required data
try:
stb = IS.get('scaffold2bin')
b2s = defaultdict(list)
for s, b in stb.items():
b2s[b].append(s)
assert len(b2s.keys()) > 0
# Load the cache
covTs = kwargs.get('covTs', IS.get('covT'))
clonTs = kwargs.get('clonTs', IS.get('clonT'))
raw_linkage_table = kwargs.get('raw_linkage_table', IS.get('raw_linkage_table'))
cumulative_snv_table = kwargs.get('cumulative_snv_table', IS.get('cumulative_snv_table'))
scaffold2length = IS.get('scaffold2length')
rl = IS.get_read_length()
profiled_scaffolds = set(scaffold2length.keys())
except:
logging.error("Skipping plot 7 - you don't have all required information. You need to run inStrain genome_wide first")
traceback.print_exc()
return
# Make the plot
logging.info("Plotting plot 7")
name = 'ScaffoldInspection_plot.pdf'
pp = PdfPages(plot_dir + name)
for genome, scaffolds in b2s.items():
if not plot_genome(genome, IS, **kwargs):
continue
present_scaffolds = list(set(scaffolds).intersection(set(profiled_scaffolds)))
Wdb, breaks, midpoints = load_windowed_metrics(present_scaffolds,
scaffold2length,
rl,
report_midpoints=True,
covTs=covTs, clonTs=clonTs,
raw_linkage_table=raw_linkage_table,
cumulative_snv_table=cumulative_snv_table)
if len(Wdb) == 0:
logging.debug(f"{genome} could not have windowed metrics loaded")
continue
scaffold_inspection_plot(Wdb, breaks, midpoints, title=genome)
fig = plt.gcf()
fig.tight_layout()
pp.savefig(fig)#, bbox_inches='tight')
#plt.show()
plt.close(fig)
# Save the figure
pp.close()
#plt.show()
plt.close('all')
def genomeWide_microdiveristy_metrics_plot(Wdb, breaks, title=''):
'''
Make the multiple metrics plot
'''
# Get set up for multiple rows
i = len(Wdb['metric'].unique())
if i > 1:
fig, ax = plt.subplots(i, 1, sharex=True)
else:
ax = {}
ax[0] = plt.gca()
i = 0
for metric in ['linkage', 'snp_density', 'coverage', 'nucl_diversity']:
#for metric, wdb in Wdb.groupby('metric'):
if metric not in set(Wdb['metric'].tolist()):
continue
wdb = Wdb[Wdb['metric'] == metric]
med = wdb['value'].median()
# Rotate colors:
colors = ['red', 'blue', 'black']
c = 0
for mm, ddb in wdb.groupby('ANI'):
ax[i].plot(ddb['midpoint'], ddb['value'], c=colors[c], label=mm, marker='o', ms=1)#, ls='')
c += 1
ax[i].set_title("{0}".format(metric))
ax[i].grid(False)
if i == 0:
ax[i].legend(loc='upper left', title='Min read ANI (%)')
# Add breaks
for b in breaks:
ax[i].axvline(b, ls='-', c='lightgrey', zorder=-1)
i += 1
plt.xlabel('genome position')
plt.xlim(0, Wdb['midpoint'].max())
plt.suptitle(title, y=0.999)
plt.subplots_adjust(hspace=0.3)
def load_windowed_metrics(scaffolds, s2l, rLen, metrics=None, window_len=None, ANI_levels=[0, 100],
min_scaff_len=0, report_midpoints=False, covTs=False, clonTs=False,
raw_linkage_table=False, cumulative_snv_table=False):
if metrics is None:
metrics = ['coverage', 'nucl_diversity', 'linkage', 'snp_density']
if type(metrics) != type([]):
print("Metrics must be a list")
return
# Figure out the MMs needed
#rLen = IS.get_read_length()
mms = [_get_mm(None, ANI, rLen=rLen) for ANI in ANI_levels]
# Sort the scaffolds
#s2l = IS.get('scaffold2length')
scaffolds = sorted(scaffolds, key=s2l.get, reverse=True)
if min_scaff_len > 0:
scaffolds = [s for s in scaffolds if s2l[s] >= min_scaff_len]
# Figure out the window length
if window_len == None:
window_len = int(sum([s2l[s] for s in scaffolds]) / 100)
else:
window_len = int(window_len)
# Calculate the breaks
breaks = []
midpoints = {}
tally = 0
for scaffold in scaffolds:
midpoints[scaffold] = tally + int(s2l[scaffold] / 2)
tally += s2l[scaffold]
breaks.append(tally)
dbs = []
if 'coverage' in metrics:
if covTs == False:
logging.error("need covTs for coverage")
raise Exception
cdb = load_windowed_coverage_or_clonality('coverage', covTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'coverage'
dbs.append(cdb)
# if 'clonality' in metrics:
# cdb = load_windowed_coverage_or_clonality(IS, 'clonality', scaffolds, window_len, mms, ANI_levels, s2l)
# cdb['metric'] = 'clonality'
# dbs.append(cdb)
if 'nucl_diversity' in metrics:
if clonTs == False:
logging.error("need clonTs for microdiversity")
raise Exception
cdb = load_windowed_coverage_or_clonality('nucl_diversity', clonTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'nucl_diversity'
dbs.append(cdb)
if 'linkage' in metrics:
if raw_linkage_table is False:
logging.error("need raw_linkage_table for linkage")
raise Exception
cdb = load_windowed_linkage(raw_linkage_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'linkage'
dbs.append(cdb)
if 'snp_density' in metrics:
if cumulative_snv_table is False:
logging.error("need cumulative_snv_table for snp_density")
raise Exception
if len(cumulative_snv_table) > 0:
cdb = load_windowed_SNP_density(cumulative_snv_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'snp_density'
dbs.append(cdb)
if len(dbs) > 0:
Wdb = | pd.concat(dbs, sort=True) | pandas.concat |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_get_matches_two_dataframes(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2).fit()
left_side = ['foo', 'bar']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_single(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
sg = StringGrouper(test_series_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_index = [0, 3, 1, 2, 0, 3]
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_1_series_1_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2', 'A3'])
sg = StringGrouper(test_series_1, master_id=test_series_id_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_side_id = ['A0', 'A3', 'A1', 'A2', 'A0', 'A3']
left_index = [0, 3, 1, 2, 0, 3]
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side_id = ['A0', 'A0', 'A1', 'A2', 'A3', 'A3']
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_2_series_2_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
sg = StringGrouper(test_series_1, test_series_2, duplicates_id=test_series_id_2,
master_id=test_series_id_1).fit()
left_side = ['foo', 'bar']
left_side_id = ['A0', 'A1']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_side_id = ['B0', 'B1']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_raises_exception_if_unexpected_options_given(self):
# When the input id data does not correspond with its string data:
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
bad_test_series_id_1 = pd.Series(['A0', 'A1'])
good_test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
bad_test_series_id_2 = pd.Series(['B0', 'B1'])
good_test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=bad_test_series_id_1)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates=test_series_2, duplicates_id=bad_test_series_id_2,
master_id=good_test_series_id_1)
# When the input data is ok but the option combinations are invalid:
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, test_series_2, master_id=good_test_series_id_1)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, test_series_2, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=good_test_series_id_1, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=good_test_series_id_1, ignore_index=True, replace_na=True)
# Here we force an exception by making the number of index-levels of duplicates different from master:
# and setting replace_na=True
test_series_2.index = pd.MultiIndex.from_tuples(list(zip(list('ABC'), [0, 1, 2])))
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates=test_series_2, replace_na=True)
def test_get_groups_single_df_group_rep_default(self):
"""Should return a pd.Series object with the same length as the original df. The series object will contain
a list of the grouped strings"""
simple_example = SimpleExample()
customers_df = simple_example.customers_df
pd.testing.assert_series_equal(
simple_example.expected_result_centroid,
group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=True
)
)
sg = StringGrouper(customers_df['Customer Name'])
pd.testing.assert_series_equal(
simple_example.expected_result_centroid,
sg.group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=True
)
)
def test_get_groups_single_valued_series(self):
"""This test ensures that get_groups() returns a single-valued DataFrame or Series object
since the input-series is also single-valued. This test was created in response to a bug discovered
by <NAME>"""
pd.testing.assert_frame_equal(
| pd.DataFrame([(0, "hello")], columns=['group_rep_index', 'group_rep']) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
| assert_series_equal(pd.NaT <= left, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from nltk import WordNetLemmatizer
from nltk.corpus import stopwords as sw
from nltk.corpus import wordnet as wn
import nltk
import string
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from numpy import array
import numpy as np
import os.path
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import ComplementNB
stopwords = set(sw.words('english'))
stopwords_pt =set(sw.words('portuguese'))
stopwords_sp =set(sw.words('spanish'))
punct = set(string.punctuation)
lemmatizer = WordNetLemmatizer()
filename_sample_sheets = {
'Phenom': 'phenom_samples.xlsx',
'Legacy 500': 'legacy500_samples.xlsx',
'Legacy 600': 'legacy600_samples.xlsx',
'Lineage': 'lineage_samples.xlsx'
}
filename_accuracy_sheets = {
'Phenom': 'phenom_accuracies.xlsx',
'Legacy 500': 'legacy500_accuracies.xlsx',
'Legacy 600': 'legacy600_accuracies.xlsx',
'Lineage': 'lineage_accuracies.xlsx'
}
paths_pickles = {
'Phenom': '../../../../models/failcode/phenom_failcode_clf.pkl',
'Legacy 500': '../../../../models/failcode/legacy500_failcode_clf.pkl',
'Legacy 600': '../../../../models/failcode/legacy600_failcode_clf.pkl',
'Lineage': '../../../../models/failcode/lineage_failcode_clf.pkl'
}
def create_dict():
for f, t in zip(dictionary['ERRADO'], dictionary['CERTO']):
my_dict[f] = t
def replace_words(word):
if my_dict.get(word):
str = my_dict.get(word)
else:
str = word
return str
def inverse_transform(X):
return [" ".join(doc) for doc in X]
def transform(X):
return [
list(tokenize(doc)) for doc in X
]
def listarDocumentos(document):
return document
def tokenize(document):
# Break the document into sentences
for sent in nltk.sent_tokenize(document):
# Break the sentence into part of speech tagged tokens
for token, tag in nltk.pos_tag(nltk.wordpunct_tokenize(sent)):
# Remove spaces before and after in string value
token = token.strip()
token = token.lower()
token = replace_words(token)
if token in stopwords:
continue
if token in stopwords_pt:
continue
if token in stopwords_sp:
continue
if all(char in punct for char in token):
continue
lemma = lemmatize(token, tag)
yield lemma
def lemmatize(token, tag):
tag = {
'N': wn.NOUN,
'V': wn.VERB,
'R': wn.ADV,
'J': wn.ADJ
}.get(tag[0], wn.NOUN)
return WordNetLemmatizer().lemmatize(token, tag)
def do_preprocessing(df):
# data_preprocessing = df['PROBLEM DESCRIPTION']
data_preprocessing = transform(df)
data_preprocessing = inverse_transform(data_preprocessing)
data_preprocessing = array(data_preprocessing)
data_preprocessing = pd.Series(data_preprocessing)
return data_preprocessing
def main():
global dictionary, my_dict, df_samples, predicted_data, df_test
global dict_accuracies, dict_accuracies_excel, df_accuracy
# Create the dictionary
xls_dict = | pd.ExcelFile('../dictionary.xls') | pandas.ExcelFile |
#!/usr/bin/env python3
#
# This makes a dataframe containing a temporal average of navg last slices
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import re
import glob
import argparse
import numpy as np
import pandas as pd
import utilities
# ========================================================================
#
# Function definitions
#
# ========================================================================
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Average slices over time")
parser.add_argument(
"-f", "--folder", help="Folder where files are stored", type=str, required=True
)
parser.add_argument(
"-n", "--navg", help="Number of time steps to average over", type=int, default=1
)
args = parser.parse_args()
# Setup
fdir = os.path.abspath(args.folder)
oname = os.path.join(fdir, "avg_slice.csv")
prefix = "output"
suffix = ".csv"
# Get time steps, keep only last navg steps
pattern = prefix + "*" + suffix
fnames = sorted(glob.glob(os.path.join(fdir, pattern)))
times = []
for fname in fnames:
times.append(int(re.findall(r"\d+", fname)[-1]))
times = np.unique(sorted(times))[-args.navg :]
# Loop over each time step and get the dataframe
lst = []
for time in times:
pattern = prefix + "*" + str(time) + suffix
fnames = sorted(glob.glob(os.path.join(fdir, pattern)))
df = utilities.get_merged_csv(fnames)
lst.append(df)
df["time"] = time
df = | pd.concat(lst, ignore_index=True) | pandas.concat |
import sparse
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from utils.clustering import *
from utils.plots import *
from lightgbm import LGBMClassifier
def retransform(arr: np.array, df: pd.DataFrame) -> pd.DataFrame:
"""Helper for scikit learn preprocessing."""
return pd.DataFrame(arr, index=df.index, columns=df.columns)
def impute(X, strategy, imputer=None):
"""Missing value imputer."""
if imputer:
X = retransform(imputer.transform(X), X)
else:
imputer = SimpleImputer(strategy=strategy)
X = retransform(imputer.fit_transform(X), X)
return X, imputer
def cwfs(K, partitions,
df, X_tr, y_tr,
tr, cv = None,
plot_importance = False):
"""Cluster Weighted Feature Selection."""
sub_features = None
plt = None
ns = None
if K != 'n_clusters':
## Determine the cluster importance using MDA.
p = partitions.reset_index()
p["index"] = ["feature_" + str(s) for s in p["index"].copy()]
cs = p.groupby("cluster")["index"].apply(list)
clusters = cs.to_dict()
# Determine cluster importance
imp_c = mda_clustered(
LogisticRegression(max_iter=1000),
df.iloc[tr],
X_tr,
y_tr,
clusters,
n_splits=5,
cv = cv
)
if plot_importance:
plt = plot_cluster_importance(imp_c)
# Determine how many features to pick from each cluster
props = imp_c["mean"] / imp_c["mean"].sum()
ns = np.round(props * K).astype(int)
# Select important features and create reduced datasets
sub_features = select_k_features(partitions, ns)
else:
# Select only the top feature w.r.t. importance from each cluster
sub_features = (partitions.sort_values("importance", ascending=False)
.groupby("cluster")
.head(1)['feature'])
return sub_features, ns, (plt, imp_c)
def scfm(part_tr, X_tr_norm, X_te_norm, y_tr, agg, plot=False, verbose=False, seed=None):
"""Sub Cluster Feature Merging."""
# Compute subclusters
X_tr_sc = {}
X_te_sc = {}
n_cl = np.unique(part_tr["cluster"])
for c in ( tqdm(n_cl) if verbose else n_cl ):
## Get features in cluster.
f_in_c = part_tr.query(f"cluster == {c}")["feature"].values
## Clean corr matrix
C_tr_star_clean = clean_matrix_(X_tr_norm[f_in_c])
## Compute subclusters.
sub_part_tr, _ = runN_Louvain(C_tr_star_clean, N=100, verbose=False, random_state=seed)
## Get original feature names back
sub_part_tr["feature"] = part_tr.query(f"cluster == {c}")["feature"].values
for sc in np.unique(sub_part_tr["cluster"]):
f_sc = sub_part_tr.query(f"cluster == {sc}")["feature"].values
X_tr_sc[f"cl{c}_subcl{sc}"] = agg(X_tr_norm[f_sc], axis=1)
X_te_sc[f"cl{c}_subcl{sc}"] = agg(X_te_norm[f_sc], axis=1)
# Convert dict to pandas df
X_tr_sc = pd.DataFrame(X_tr_sc)
X_te_sc = | pd.DataFrame(X_te_sc) | pandas.DataFrame |
from enum import IntEnum
import os
import pandas as pd
from pathlib import Path
import time
class outcome(IntEnum):
UNCHANGED = 0
IMPROVED = 1
DETERIORATED = -1
class attribute:
geneName = str
variantNumber = int
drugName = str
outcome = str
relation = bool
sideEffect = bool
def _type(self):
return [_type for name, _type in (vars(self.__class__).items()) if not name.startswith("_")]
def _name(self):
return [name for name, _type in (vars(self.__class__).items()) if not name.startswith("_")]
# def __init__(self):
# self.geneName = str
class fileReader:
def __init__(self, input_dir, suffix=None):
self.files = []
self.input_dir = input_dir
self.suffix = suffix
self.buffs = {}
self.__loadfiles()
self.data = None
def __loadfiles(self):
with os.scandir(self.input_dir) as it:
for entry in it:
if entry.is_file() and (os.path.splitext(entry.name)[1] == self.suffix or self.suffix == None):
self.files.append(entry)
def readfiles(self):
# self.loadfiles()
for f in self.files:
with open(f, "r") as ifile:
self.buffs[f.name] = ifile.readlines()
return self.buffs
def getFileNames(self):
return [f.name for f in self.files]
def getFilePathes(self):
return [f.path for f in self.files]
class database:
def __init__(self, input_dir):
freader = fileReader(input_dir, '.txt')
buffs = freader.readfiles()
col_names = attribute()._name()
self.data = []
for f in freader.getFilePathes():
self.data.append(pd.read_csv(f, sep='\t', names=col_names))
self.data = | pd.concat(self.data, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
# from pandas.core.tools.datetimes import normalize_date
from pandas._libs import tslib
from backend.robinhood_api import RobinhoodAPI
class RobinhoodData:
"""
Wrapper to download orders and dividends from Robinhood accounts
Downloads two dataframes and saves to datafile
----------
Parameters:
datafile : location of h5 datafile
"""
def __init__(self, datafile):
self.datafile = datafile
def _login(self, user, password):
self.client = RobinhoodAPI()
# try import the module with passwords
try:
_temp = __import__('auth')
self.client.login(_temp.local_user, _temp.local_password)
except:
self.client.login(username=user, password=password)
return self
# private method for getting all orders
def _fetch_json_by_url(self, url):
return self.client.session.get(url).json()
# deleting sensitive or redundant fields
def _delete_sensitive_fields(self, df):
for col in ['account', 'url', 'id', 'instrument']:
if col in df:
del df[col]
return df
# download orders and fields requiring RB client
def _download_orders(self):
print("Downloading orders from Robinhood")
orders = []
past_orders = self.client.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
next_url = past_orders['next']
past_orders = self._fetch_json_by_url(next_url)
orders.extend(past_orders['results'])
df = pd.DataFrame(orders)
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='created_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_ord = self._delete_sensitive_fields(df)
return df_ord
# download dividends and fields requiring RB client
def _download_dividends(self):
print("Downloading dividends from Robinhood")
dividends = self.client.dividends()
dividends = [x for x in dividends['results']]
df = | pd.DataFrame(dividends) | pandas.DataFrame |
import asyncio
import sys
import random as rand
import os
from .integration_test_utils import setup_teardown_test, _generate_table_name, V3ioHeaders, V3ioError
from storey import build_flow, CSVSource, CSVTarget, SyncEmitSource, Reduce, Map, FlatMap, AsyncEmitSource, ParquetTarget, ParquetSource, \
DataframeSource, ReduceToDataFrame
import pandas as pd
import aiohttp
import pytest
import v3io
import uuid
import datetime
@pytest.fixture()
def v3io_create_csv():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
asyncio.run(_write_test_csv(file_path))
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
@pytest.fixture()
def v3io_teardown_file():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
async def _write_test_csv(file_path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
await client_session.put(f'{v3io_access._webapi_url}/{file_path}', data=data,
headers=v3io_access._get_put_file_headers, ssl=False)
finally:
await client_session.close()
async def _delete_file(path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
response = await client_session.delete(f'{v3io_access._webapi_url}/{path}',
headers=v3io_access._get_put_file_headers, ssl=False)
if response.status >= 300 and response.status != 404 and response.status != 409:
body = await response.text()
raise V3ioError(f'Failed to delete item at {path}. Response status code was {response.status}: {body}')
finally:
await client_session.close()
def test_csv_reader_from_v3io(v3io_create_csv):
controller = build_flow([
CSVSource(f'v3io:///{v3io_create_csv}', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
def test_csv_reader_from_v3io_error_on_file_not_found():
controller = build_flow([
CSVSource('v3io:///bigdatra/tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_v3io(v3io_teardown_csv):
controller = build_flow([
AsyncEmitSource(),
CSVTarget(f'v3io:///{v3io_teardown_csv}', columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.aio.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_csv.split('/', 1)
result = await v3io_client.object.get(container, path)
finally:
await v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_to_v3io(v3io_teardown_file):
asyncio.run(async_test_write_csv_to_v3io(v3io_teardown_file))
def test_write_csv_with_dict_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_infer_columns_without_header_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['event_key=$key', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_to_parquet_to_v3io(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, partition_cols='my_int', columns=columns, max_events=1)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected_in_pyarrow1 = pd.DataFrame(expected, columns=columns)
expected_in_pyarrow3 = expected_in_pyarrow1.copy()
expected_in_pyarrow1['my_int'] = expected_in_pyarrow1['my_int'].astype('int32')
expected_in_pyarrow3['my_int'] = expected_in_pyarrow3['my_int'].astype('category')
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_dir, columns=columns)
assert read_back_df.equals(expected_in_pyarrow1) or read_back_df.equals(expected_in_pyarrow3)
def test_write_to_parquet_to_v3io_single_file_on_termination(setup_teardown_test):
out_file = f'v3io:///{setup_teardown_test}/out.parquet'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, columns=columns)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = pd.DataFrame(expected, columns=columns, dtype='int64')
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
# ML-775
def test_write_to_parquet_key_hash_partitioning(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}/test_write_to_parquet_default_partitioning{uuid.uuid4().hex}/'
controller = build_flow([
SyncEmitSource(key_field=1),
ParquetTarget(out_dir, columns=['my_int', 'my_string'], partition_cols=[('$key', 4)])
]).run()
expected = []
expected_buckets = [3, 0, 1, 3, 0, 3, 1, 1, 1, 2]
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}', expected_buckets[i]])
expected = pd.DataFrame(expected, columns=['my_int', 'my_string', 'hash4_key'])
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_dir)
read_back_df['hash4_key'] = read_back_df['hash4_key'].astype('int64')
read_back_df.sort_values('my_int', inplace=True)
read_back_df.reset_index(inplace=True, drop=True)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
# ML-701
def test_write_to_parquet_to_v3io_force_string_to_timestamp(setup_teardown_test):
out_file = f'v3io:///{setup_teardown_test}/out.parquet'
columns = ['time']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, columns=[('time', 'datetime')])
]).run()
expected = []
for i in range(10):
t = '2021-03-02T19:45:00'
controller.emit([t])
expected.append([datetime.datetime.fromisoformat(t)])
expected = pd.DataFrame(expected, columns=columns)
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns)
assert read_back_df.equals(expected)
def test_write_to_parquet_to_v3io_with_indices(setup_teardown_test):
out_file = f'v3io:///{setup_teardown_test}/test_write_to_parquet_with_indices{uuid.uuid4().hex}.parquet'
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, index_cols='event_key=$key', columns=['my_int', 'my_string'])
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'], key=f'key{i}')
expected.append([f'key{i}', i, f'this is {i}'])
columns = ['event_key', 'my_int', 'my_string']
expected = pd.DataFrame(expected, columns=columns, dtype='int64')
expected.set_index(['event_key'], inplace=True)
controller.terminate()
controller.await_termination()
read_back_df = | pd.read_parquet(out_file, columns=columns) | pandas.read_parquet |
from typing import List
import pandas as pd
from matplotlib import pyplot as plt
import mundi
import sidekick as sk
from mundi import Region
from pydemic.utils import fmt, pc
from pydemic_ui import st
from pydemic_ui.app import SimpleApp
from pydemic_ui.apps.sitrep import abstract, cases_or_deaths, cases_plot
from pydemic_ui.i18n import _, __
APPS = {
"abstract": abstract,
"cases_or_deaths": cases_or_deaths,
"cases_plot": cases_plot,
}
class ReportCasesTableApp(SimpleApp):
"""
A simple table-centric situation report.
It shows the number of cases and deaths in the last 14 and 28 days for each
sub-region in the selected query.
"""
message = __(
"""
The highest increase in the reported case rate in the last 14 days was observed in
the {self.max_cases_region} region, followed by the {self.other_cases_regions} regions.
The highest increase in the mortality rate in the last 14 days was observed in the
{self.max_deaths_region}, followed by the {self.other_deaths_regions} regions.
"""
)
mundi_query = {"country_code": "BR", "type": "state"}
@sk.lazy
def regions(self) -> List[Region]:
"""
Return a list of regions
"""
return regions(**self.mundi_query)
@sk.lazy
def tables(self):
"""
Compute tables by state and by region.
"""
def get_data(rs):
data = [*map(info, rs)]
index = [r.id for r in rs]
data = pd.DataFrame(data, index=index)
data.columns = pd.MultiIndex.from_tuples(data.columns)
dtypes = {col: float for col in data.dtypes.keys()}
dtypes["", "name"] = str
return data.astype(dtypes)
parent_ids = sorted({r.parent_id for r in self.regions})
parents = [*map(mundi.region, parent_ids)]
return get_data(parents), get_data(self.regions)
def _max_region(self, col):
data, _ = self.tables
names = data[col].sort_values()
return mundi.region(names.index[-1]).name
def _other_regions(self, col):
data, _ = self.tables
names = data.sort_values(col).mundi["name"]
*other, last = names.iloc[1:]
other = ", ".join(other)
if other:
return _(" and ").join([other, last])
return last
@sk.lazy
@sk.ignore_error(AttributeError, handler=str)
def max_cases_region(self):
return self._max_region(("14 days", "cases"))
@sk.lazy
@sk.ignore_error(AttributeError, handler=str)
def max_deaths_region(self):
return self._max_region(("14 days", "deaths"))
@sk.lazy
@sk.ignore_error(AttributeError, handler=str)
def other_cases_regions(self):
return self._other_regions(("14 days", "cases"))
@sk.lazy
@sk.ignore_error(AttributeError, handler=str)
def other_deaths_regions(self):
return self._other_regions(("14 days", "deaths"))
def ask(self):
...
def show(self):
by_region, by_state = self.tables
self.st.markdown(self.message.format(self=self))
self.st.subheader(_("Epidemic situation, by region"))
self.st.markdown(_("Cases and deaths by 100k people."))
self.show_table(by_region)
self.st.subheader(_("Epidemic situation, by state"))
self.st.markdown(_("Cases and deaths by 100k people."))
self.show_table(by_state)
def show_table(self, data):
self.st.write(
data.style_dataframe.format(fmt)
.format({("increase", x): pc for x in ["cases", "deaths"]})
.highlight_max(axis=0, color="red")
.highlight_min(axis=0, color="green")
)
def groupby_parent(data, column="parent_id"):
parents_col = pd.DataFrame({column: parents(data.index)})
new = | pd.concat([data, parents_col], axis=1) | pandas.concat |
# coding: utf-8
# In[1]:
import pandas as pd
tweets = | pd.read_csv("tweets.csv") | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Load messages and categories files from a csv to a dataframe, merge them togheter and return the dataframe
Attributes:
messages_filepath = fullpath including filename
categories_filepath = fullpath including filename
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, how='left', on='id')
return df
def clean_data(df):
'''
Clean and transform the dataframe to expand the categories into individual columns
Attributes:
df = merged dataframe
'''
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(';', expand=True)
names = list(categories.iloc[0,:].str[:-2])
categories.columns = names
categories = categories.apply(lambda x: x.str[-1], axis=1)
categories = categories.apply(pd.to_numeric, axis=1)
categories['related'].replace(2, 1, inplace=True) #replace 2 by 1
# drop the original categories column from `df`
df.drop(columns=['categories'], inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
cleaned_df = | pd.concat([df, categories], axis=1) | pandas.concat |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
300.0, 250.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object'),
name='g1'
)
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c')
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns.stats(column='g2', group_by=group_by)
)
stats_df = drawdowns.stats(agg_func=None)
assert stats_df.shape == (4, 21)
pd.testing.assert_index_equal(stats_df.index, drawdowns.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# orders.py ############# #
close = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6),
datetime(2020, 1, 7),
datetime(2020, 1, 8)
]).vbt.tile(4, keys=['a', 'b', 'c', 'd'])
size = np.full(close.shape, np.nan, dtype=np.float_)
size[:, 0] = [1, 0.1, -1, -0.1, np.nan, 1, -1, 2]
size[:, 1] = [-1, -0.1, 1, 0.1, np.nan, -1, 1, -2]
size[:, 2] = [1, 0.1, -1, -0.1, np.nan, 1, -2, 2]
orders = vbt.Portfolio.from_orders(close, size, fees=0.01, freq='1 days').orders
orders_grouped = orders.regroup(group_by)
class TestOrders:
def test_mapped_fields(self):
for name in order_dt.names:
np.testing.assert_array_equal(
getattr(orders, name).values,
orders.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
orders.close,
close
)
pd.testing.assert_series_equal(
orders['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
orders_grouped['g1'].close,
close[['a', 'b']]
)
assert orders.replace(close=None)['a'].close is None
def test_records_readable(self):
records_readable = orders.records_readable
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-02T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 1.0,
2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 2.0, 2.0
])
)
np.testing.assert_array_equal(
records_readable['Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0,
8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03,
0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, 0.06, 0.14,
0.16
])
)
np.testing.assert_array_equal(
records_readable['Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', 'Buy', 'Sell', 'Buy', 'Sell', 'Sell',
'Buy', 'Buy', 'Sell', 'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell',
'Buy', 'Sell', 'Buy'
])
)
def test_buy_records(self):
assert isinstance(orders.buy, vbt.Orders)
assert orders.buy.wrapper == orders.wrapper
record_arrays_close(
orders['a'].buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].buy.values,
orders.buy['a'].values
)
record_arrays_close(
orders.buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0),
(9, 1, 2, 1., 3., 0.03, 0), (10, 1, 3, 0.1, 4., 0.004, 0),
(12, 1, 6, 1., 7., 0.07, 0), (14, 2, 0, 1., 1., 0.01, 0),
(15, 2, 1, 0.1, 2., 0.002, 0), (18, 2, 5, 1., 6., 0.06, 0),
(20, 2, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
def test_sell_records(self):
assert isinstance(orders.sell, vbt.Orders)
assert orders.sell.wrapper == orders.wrapper
record_arrays_close(
orders['a'].sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].sell.values,
orders.sell['a'].values
)
record_arrays_close(
orders.sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1), (7, 1, 0, 1., 1., 0.01, 1),
(8, 1, 1, 0.1, 2., 0.002, 1), (11, 1, 5, 1., 6., 0.06, 1),
(13, 1, 7, 2., 8., 0.16, 1), (16, 2, 2, 1., 3., 0.03, 1),
(17, 2, 3, 0.1, 4., 0.004, 1), (19, 2, 6, 2., 7., 0.14, 1)
], dtype=order_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Total Buy Orders', 'Total Sell Orders',
'Min Size', 'Max Size', 'Avg Size', 'Avg Buy Size', 'Avg Sell Size',
'Avg Buy Price', 'Avg Sell Price', 'Total Fees', 'Min Fees', 'Max Fees',
'Avg Fees', 'Avg Buy Fees', 'Avg Sell Fees'
], dtype='object')
pd.testing.assert_series_equal(
orders.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 5.25, 2.75, 2.5, 0.10000000000000002, 2.0,
0.9333333333333335, 0.9166666666666666, 0.9194444444444446, 4.388888888888889,
4.527777777777779, 0.26949999999999996, 0.002, 0.16, 0.051333333333333335,
0.050222222222222224, 0.050222222222222224
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
orders.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 7, 4, 3, 0.1, 2.0, 0.8857142857142858,
1.025, 0.7000000000000001, 4.25, 4.666666666666667, 0.33599999999999997,
0.002, 0.16, 0.047999999999999994, 0.057999999999999996, 0.03466666666666667
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
orders.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 14, 7, 7, 0.1, 2.0, 0.8857142857142858,
0.8857142857142856, 0.8857142857142858, 4.428571428571429, 4.428571428571429,
0.672, 0.002, 0.16, 0.048, 0.048, 0.047999999999999994
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c')
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders.stats(column='g2', group_by=group_by)
)
stats_df = orders.stats(agg_func=None)
assert stats_df.shape == (4, 19)
pd.testing.assert_index_equal(stats_df.index, orders.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# trades.py ############# #
exit_trades = vbt.ExitTrades.from_orders(orders)
exit_trades_grouped = vbt.ExitTrades.from_orders(orders_grouped)
class TestExitTrades:
def test_mapped_fields(self):
for name in trade_dt.names:
if name == 'return':
np.testing.assert_array_equal(
getattr(exit_trades, 'returns').values,
exit_trades.values[name]
)
else:
np.testing.assert_array_equal(
getattr(exit_trades, name).values,
exit_trades.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
exit_trades.close,
close
)
pd.testing.assert_series_equal(
exit_trades['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
exit_trades_grouped['g1'].close,
close[['a', 'b']]
)
assert exit_trades.replace(close=None)['a'].close is None
def test_records_arr(self):
record_arrays_close(
exit_trades.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_orders = orders.replace(records_arr=np.concatenate((
orders.values[orders.values['col'] == 2],
orders.values[orders.values['col'] == 1],
orders.values[orders.values['col'] == 0]
)))
record_arrays_close(
vbt.ExitTrades.from_orders(reversed_col_orders).values,
exit_trades.values
)
def test_records_readable(self):
records_readable = exit_trades.records_readable
np.testing.assert_array_equal(
records_readable['Exit Trade Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.10000000000000009, 1.0, 2.0, 1.0, 0.10000000000000009, 1.0,
2.0, 1.0, 0.10000000000000009, 1.0, 1.0, 1.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Entry Price'].values,
np.array([
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Fees'].values,
np.array([
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.07, 0.08
])
)
np.testing.assert_array_equal(
records_readable['Exit Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Exit Price'].values,
np.array([
3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Exit Fees'].values,
np.array([
0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.08, 0.0
])
)
np.testing.assert_array_equal(
records_readable['PnL'].values,
np.array([
1.8681818181818182, 0.2858181818181821, 0.8699999999999999, -0.16,
-1.9500000000000002, -0.29600000000000026, -1.1300000000000001,
-0.16, 1.8681818181818182, 0.2858181818181821, 0.8699999999999999,
-1.1500000000000001, -0.08
])
)
np.testing.assert_array_equal(
records_readable['Return'].values,
np.array([
1.7125000000000001, 2.62, 0.145, -0.01, -1.7875000000000003,
-2.7133333333333334, -0.18833333333333335, -0.01,
1.7125000000000001, 2.62, 0.145, -0.1642857142857143, -0.01
])
)
np.testing.assert_array_equal(
records_readable['Direction'].values,
np.array([
'Long', 'Long', 'Long', 'Long', 'Short', 'Short', 'Short',
'Short', 'Long', 'Long', 'Long', 'Short', 'Long'
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed', 'Closed', 'Closed',
'Open', 'Closed', 'Closed', 'Closed', 'Closed', 'Open'
])
)
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9
])
)
def test_duration(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].duration.values,
np.array([2, 3, 1, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.duration.values,
np.array([2, 3, 1, 1, 2, 3, 1, 1, 2, 3, 1, 1, 1])
)
def test_winning_records(self):
assert isinstance(exit_trades.winning, vbt.ExitTrades)
assert exit_trades.winning.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].winning.values,
exit_trades.winning['a'].values
)
record_arrays_close(
exit_trades.winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7)
], dtype=trade_dt)
)
def test_losing_records(self):
assert isinstance(exit_trades.losing, vbt.ExitTrades)
assert exit_trades.losing.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].losing.values,
exit_trades.losing['a'].values
)
record_arrays_close(
exit_trades.losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_win_rate(self):
assert exit_trades['a'].win_rate() == 0.75
pd.testing.assert_series_equal(
exit_trades.win_rate(),
pd.Series(
np.array([0.75, 0., 0.6, np.nan]),
index=close.columns
).rename('win_rate')
)
pd.testing.assert_series_equal(
exit_trades_grouped.win_rate(),
pd.Series(
np.array([0.375, 0.6]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('win_rate')
)
def test_winning_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].winning_streak.values,
np.array([1, 2, 3, 0])
)
np.testing.assert_array_almost_equal(
exit_trades.winning_streak.values,
np.array([1, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0])
)
def test_losing_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].losing_streak.values,
np.array([0, 0, 0, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.losing_streak.values,
np.array([0, 0, 0, 1, 1, 2, 3, 4, 0, 0, 0, 1, 2])
)
def test_profit_factor(self):
assert exit_trades['a'].profit_factor() == 18.9
pd.testing.assert_series_equal(
exit_trades.profit_factor(),
pd.Series(
np.array([18.9, 0., 2.45853659, np.nan]),
index=ts2.columns
).rename('profit_factor')
)
pd.testing.assert_series_equal(
exit_trades_grouped.profit_factor(),
pd.Series(
np.array([0.81818182, 2.45853659]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('profit_factor')
)
def test_expectancy(self):
assert exit_trades['a'].expectancy() == 0.716
pd.testing.assert_series_equal(
exit_trades.expectancy(),
pd.Series(
np.array([0.716, -0.884, 0.3588, np.nan]),
index=ts2.columns
).rename('expectancy')
)
pd.testing.assert_series_equal(
exit_trades_grouped.expectancy(),
pd.Series(
np.array([-0.084, 0.3588]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('expectancy')
)
def test_sqn(self):
assert exit_trades['a'].sqn() == 1.634155521947584
pd.testing.assert_series_equal(
exit_trades.sqn(),
pd.Series(
np.array([1.63415552, -2.13007307, 0.71660403, np.nan]),
index=ts2.columns
).rename('sqn')
)
pd.testing.assert_series_equal(
exit_trades_grouped.sqn(),
pd.Series(
np.array([-0.20404671, 0.71660403]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('sqn')
)
def test_long_records(self):
assert isinstance(exit_trades.long, vbt.ExitTrades)
assert exit_trades.long.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].long.values,
exit_trades.long['a'].values
)
record_arrays_close(
exit_trades.long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_short_records(self):
assert isinstance(exit_trades.short, vbt.ExitTrades)
assert exit_trades.short.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].short.values,
np.array([], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].short.values,
exit_trades.short['a'].values
)
record_arrays_close(
exit_trades.short.values,
np.array([
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_open_records(self):
assert isinstance(exit_trades.open, vbt.ExitTrades)
assert exit_trades.open.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].open.values,
exit_trades.open['a'].values
)
record_arrays_close(
exit_trades.open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_closed_records(self):
assert isinstance(exit_trades.closed, vbt.ExitTrades)
assert exit_trades.closed.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].closed.values,
exit_trades.closed['a'].values
)
record_arrays_close(
exit_trades.closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'First Trade Start', 'Last Trade End',
'Coverage', 'Overlap Coverage', 'Total Records', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
pd.testing.assert_series_equal(
exit_trades.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 1.3333333333333333, 168.38888888888889,
-91.08730158730158, 149.25, -86.3670634920635, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), np.inf, 0.11705555555555548, 0.18931590012681135
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(settings=dict(incl_open=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 2.3333333333333335, 174.33333333333334,
-96.25396825396825, 149.25, -42.39781746031746, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 06:00:00'), 7.11951219512195, 0.06359999999999993, 0.07356215977397455
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('2 days 00:00:00'), 4, 4, 0, 3, 1, -0.16, 100.0, 3, 0,
262.0, 14.499999999999998, 149.25, np.nan, pd.Timedelta('2 days 00:00:00'),
pd.NaT, np.inf, 1.008, 2.181955050824476
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 8, 4, 4, 6, 2, -0.32, 50.0, 3, 3, 262.0,
-271.3333333333333, 149.25, -156.30555555555557, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('2 days 00:00:00'), 0.895734597156398, -0.058666666666666756, -0.10439051512510047
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_index_equal(
exit_trades.stats(tags='trades').index,
pd.Index([
'First Trade Start', 'Last Trade End', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
)
pd.testing.assert_series_equal(
exit_trades['c'].stats(),
exit_trades.stats(column='c')
)
pd.testing.assert_series_equal(
exit_trades['c'].stats(),
exit_trades.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
exit_trades_grouped['g2'].stats(),
exit_trades_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
exit_trades_grouped['g2'].stats(),
exit_trades.stats(column='g2', group_by=group_by)
)
stats_df = exit_trades.stats(agg_func=None)
assert stats_df.shape == (4, 25)
pd.testing.assert_index_equal(stats_df.index, exit_trades.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
entry_trades = vbt.EntryTrades.from_orders(orders)
entry_trades_grouped = vbt.EntryTrades.from_orders(orders_grouped)
class TestEntryTrades:
def test_records_arr(self):
record_arrays_close(
entry_trades.values,
np.array([
(0, 0, 1.0, 0, 1.0, 0.01, 3, 3.0909090909090904, 0.03090909090909091, 2.05, 2.05, 0, 1, 0),
(1, 0, 0.1, 1, 2.0, 0.002, 3, 3.0909090909090904, 0.003090909090909091,
0.10399999999999998, 0.5199999999999999, 0, 1, 0),
(2, 0, 1.0, 5, 6.0, 0.06, 6, 7.0, 0.07, 0.8699999999999999, 0.145, 0, 1, 1),
(3, 0, 2.0, 7, 8.0, 0.16, 7, 8.0, 0.0, -0.16, -0.01, 0, 0, 2),
(4, 1, 1.0, 0, 1.0, 0.01, 3, 3.0909090909090904, 0.03090909090909091,
-2.131818181818181, -2.131818181818181, 1, 1, 3),
(5, 1, 0.1, 1, 2.0, 0.002, 3, 3.0909090909090904, 0.003090909090909091,
-0.11418181818181816, -0.5709090909090908, 1, 1, 3),
(6, 1, 1.0, 5, 6.0, 0.06, 6, 7.0, 0.07, -1.1300000000000001, -0.18833333333333335, 1, 1, 4),
(7, 1, 2.0, 7, 8.0, 0.16, 7, 8.0, 0.0, -0.16, -0.01, 1, 0, 5),
(8, 2, 1.0, 0, 1.0, 0.01, 3, 3.0909090909090904, 0.03090909090909091, 2.05, 2.05, 0, 1, 6),
(9, 2, 0.1, 1, 2.0, 0.002, 3, 3.0909090909090904, 0.003090909090909091,
0.10399999999999998, 0.5199999999999999, 0, 1, 6),
(10, 2, 1.0, 5, 6.0, 0.06, 6, 7.0, 0.07, 0.8699999999999999, 0.145, 0, 1, 7),
(11, 2, 1.0, 6, 7.0, 0.07, 7, 8.0, 0.08, -1.1500000000000001, -0.1642857142857143, 1, 1, 8),
(12, 2, 1.0, 7, 8.0, 0.08, 7, 8.0, 0.0, -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_orders = orders.replace(records_arr=np.concatenate((
orders.values[orders.values['col'] == 2],
orders.values[orders.values['col'] == 1],
orders.values[orders.values['col'] == 0]
)))
record_arrays_close(
vbt.EntryTrades.from_orders(reversed_col_orders).values,
entry_trades.values
)
def test_records_readable(self):
records_readable = entry_trades.records_readable
np.testing.assert_array_equal(
records_readable['Entry Trade Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
])
)
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9
])
)
positions = vbt.Positions.from_trades(exit_trades)
positions_grouped = vbt.Positions.from_trades(exit_trades_grouped)
class TestPositions:
def test_records_arr(self):
record_arrays_close(
positions.values,
np.array([
(0, 0, 1.1, 0, 1.09090909, 0.012, 3, 3.09090909, 0.034, 2.154, 1.795, 0, 1, 0),
(1, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(2, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(3, 1, 1.1, 0, 1.09090909, 0.012, 3, 3.09090909, 0.034, -2.246, -1.87166667, 1, 1, 3),
(4, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(5, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(6, 2, 1.1, 0, 1.09090909, 0.012, 3, 3.09090909, 0.034, 2.154, 1.795, 0, 1, 6),
(7, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(8, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(9, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_trades = exit_trades.replace(records_arr=np.concatenate((
exit_trades.values[exit_trades.values['col'] == 2],
exit_trades.values[exit_trades.values['col'] == 1],
exit_trades.values[exit_trades.values['col'] == 0]
)))
record_arrays_close(
vbt.Positions.from_trades(reversed_col_trades).values,
positions.values
)
def test_records_readable(self):
records_readable = positions.records_readable
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9
])
)
assert 'Parent Id' not in records_readable.columns
# ############# logs.py ############# #
logs = vbt.Portfolio.from_orders(close, size, fees=0.01, log=True, freq='1 days').logs
logs_grouped = logs.regroup(group_by)
class TestLogs:
def test_mapped_fields(self):
for name in log_dt.names:
np.testing.assert_array_equal(
getattr(logs, name).values,
logs.values[name]
)
def test_records_readable(self):
records_readable = logs.records_readable
np.testing.assert_array_equal(
records_readable['Log Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c',
'c', 'c', 'c', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd'
])
)
np.testing.assert_array_equal(
records_readable['Group'].values,
np.array([
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
])
)
np.testing.assert_array_equal(
records_readable['Cash'].values,
np.array([
100.0, 98.99, 98.788, 101.758, 102.154, 102.154, 96.094, 103.024, 100.0, 100.99, 101.18799999999999,
98.15799999999999, 97.75399999999999, 97.75399999999999, 103.69399999999999, 96.624, 100.0, 98.99,
98.788, 101.758, 102.154, 102.154, 96.094, 109.954, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0
])
)
np.testing.assert_array_equal(
records_readable['Position'].values,
np.array([
0.0, 1.0, 1.1, 0.10000000000000009, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, -1.1, -0.10000000000000009, 0.0, 0.0,
-1.0, 0.0, 0.0, 1.0, 1.1, 0.10000000000000009, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0
])
)
np.testing.assert_array_equal(
records_readable['Debt'].values,
np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.2, 0.10909090909090913, 0.0, 0.0, 6.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
])
)
np.testing.assert_array_equal(
records_readable['Free Cash'].values,
np.array([
100.0, 98.99, 98.788, 101.758, 102.154, 102.154, 96.094, 103.024, 100.0, 98.99, 98.788,
97.93981818181818, 97.754, 97.754, 91.694, 96.624, 100.0, 98.99, 98.788, 101.758, 102.154, 102.154,
96.094, 95.954, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0
])
)
np.testing.assert_array_equal(
records_readable['Val Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0,
6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Value'].values,
np.array([
100.0, 100.99, 102.088, 102.158, 102.154, 102.154, 103.094, 103.024, 100.0, 98.99, 97.88799999999999,
97.75799999999998, 97.75399999999999, 97.75399999999999, 96.69399999999999, 96.624, 100.0, 100.99,
102.088, 102.158, 102.154, 102.154, 103.094, 101.954, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0
])
)
np.testing.assert_array_equal(
records_readable['Request Size'].values,
np.array([
1.0, 0.1, -1.0, -0.1, np.nan, 1.0, -1.0, 2.0, -1.0, -0.1, 1.0, 0.1, np.nan, -1.0, 1.0, -2.0, 1.0, 0.1,
-1.0, -0.1, np.nan, 1.0, -2.0, 2.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
])
)
np.testing.assert_array_equal(
records_readable['Request Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0,
6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Request Size Type'].values,
np.array([
'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount',
'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount',
'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount', 'Amount',
'Amount', 'Amount'
])
)
np.testing.assert_array_equal(
records_readable['Request Direction'].values,
np.array([
'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both',
'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both',
'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both', 'Both'
])
)
np.testing.assert_array_equal(
records_readable['Request Fees'].values,
np.array([
0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01,
0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01
])
)
np.testing.assert_array_equal(
records_readable['Request Fixed Fees'].values,
np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
])
)
np.testing.assert_array_equal(
records_readable['Request Slippage'].values,
np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
])
)
np.testing.assert_array_equal(
records_readable['Request Min Size'].values,
np.array([
1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08,
1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08,
1e-08, 1e-08
])
)
np.testing.assert_array_equal(
records_readable['Request Max Size'].values,
np.array([
np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,
np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,
np.inf, np.inf, np.inf, np.inf, np.inf, np.inf
])
)
np.testing.assert_array_equal(
records_readable['Request Size Granularity'].values,
np.array([
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
])
)
np.testing.assert_array_equal(
records_readable['Request Rejection Prob'].values,
np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
])
)
np.testing.assert_array_equal(
records_readable['Request Lock Cash'].values,
np.array([
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False
])
)
np.testing.assert_array_equal(
records_readable['Request Allow Partial'].values,
np.array([
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True
])
)
np.testing.assert_array_equal(
records_readable['Request Raise Rejection'].values,
np.array([
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False
])
)
np.testing.assert_array_equal(
records_readable['Request Log'].values,
np.array([
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True
])
)
np.testing.assert_array_equal(
records_readable['New Cash'].values,
np.array([
98.99, 98.788, 101.758, 102.154, 102.154, 96.094, 103.024, 86.864, 100.99, 101.18799999999999,
98.15799999999999, 97.75399999999999, 97.75399999999999, 103.69399999999999, 96.624, 112.464, 98.99,
98.788, 101.758, 102.154, 102.154, 96.094, 109.954, 93.794, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0
])
)
np.testing.assert_array_equal(
records_readable['New Position'].values,
np.array([
1.0, 1.1, 0.10000000000000009, 0.0, 0.0, 1.0, 0.0, 2.0, -1.0, -1.1, -0.10000000000000009, 0.0, 0.0,
-1.0, 0.0, -2.0, 1.0, 1.1, 0.10000000000000009, 0.0, 0.0, 1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
])
)
np.testing.assert_array_equal(
records_readable['New Debt'].values,
np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.2, 0.10909090909090913, 0.0, 0.0, 6.0, 0.0, 16.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
])
)
np.testing.assert_array_equal(
records_readable['New Free Cash'].values,
np.array([
98.99, 98.788, 101.758, 102.154, 102.154, 96.094, 103.024, 86.864, 98.99, 98.788, 97.93981818181818,
97.754, 97.754, 91.694, 96.624, 80.464, 98.99, 98.788, 101.758, 102.154, 102.154, 96.094, 95.954,
93.794, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0
])
)
np.testing.assert_array_equal(
records_readable['New Val Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0,
6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['New Value'].values,
np.array([
100.0, 100.99, 102.088, 102.158, 102.154, 102.154, 103.094, 103.024, 100.0, 98.99, 97.88799999999999,
97.75799999999998, 97.75399999999999, 97.75399999999999, 96.69399999999999, 96.624, 100.0, 100.99,
102.088, 102.158, 102.154, 102.154, 103.094, 101.954, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0
])
)
np.testing.assert_array_equal(
records_readable['Result Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, np.nan, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, np.nan, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0,
0.1, np.nan, 1.0, 2.0, 2.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
])
)
np.testing.assert_array_equal(
records_readable['Result Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, np.nan, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, np.nan, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0,
4.0, np.nan, 6.0, 7.0, 8.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
])
)
np.testing.assert_array_equal(
records_readable['Result Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, np.nan, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, np.nan, 0.06, 0.07, 0.16,
0.01, 0.002, 0.03, 0.004, np.nan, 0.06, 0.14, 0.16, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan
])
)
np.testing.assert_array_equal(
records_readable['Result Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', None, 'Buy', 'Sell', 'Buy', 'Sell', 'Sell', 'Buy', 'Buy', None, 'Sell',
'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell', None, 'Buy', 'Sell', 'Buy', None, None, None, None, None,
None, None, None
])
)
np.testing.assert_array_equal(
records_readable['Result Status'].values,
np.array([
'Filled', 'Filled', 'Filled', 'Filled', 'Ignored', 'Filled', 'Filled', 'Filled', 'Filled', 'Filled',
'Filled', 'Filled', 'Ignored', 'Filled', 'Filled', 'Filled', 'Filled', 'Filled', 'Filled', 'Filled',
'Ignored', 'Filled', 'Filled', 'Filled', 'Ignored', 'Ignored', 'Ignored', 'Ignored', 'Ignored',
'Ignored', 'Ignored', 'Ignored'
])
)
np.testing.assert_array_equal(
records_readable['Result Status Info'].values,
np.array([
None, None, None, None, 'SizeNaN', None, None, None, None, None, None, None, 'SizeNaN', None, None,
None, None, None, None, None, 'SizeNaN', None, None, None, 'SizeNaN', 'SizeNaN', 'SizeNaN', 'SizeNaN',
'SizeNaN', 'SizeNaN', 'SizeNaN', 'SizeNaN'
])
)
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14, 15, 16, 17, -1, 18, 19, 20, -1, -1, -1, -1,
-1, -1, -1, -1
])
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Status Counts: None',
'Status Counts: Filled', 'Status Counts: Ignored',
'Status Counts: Rejected', 'Status Info Counts: None',
'Status Info Counts: SizeNaN'
], dtype='object')
pd.testing.assert_series_equal(
logs.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 8.0, 0.0, 5.25, 2.75, 0.0, 5.25, 2.75
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
logs.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 8, 0, 7, 1, 0, 7, 1
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
logs.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
| pd.Timedelta('8 days 00:00:00') | pandas.Timedelta |
"""
Prepare training and testing datasets as CSV dictionaries 2.0 (Further modification required for GBM)
Created on 04/26/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in(slide, label, root_dir, sldnum):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '_{}.png'.format(str(sldnum)) in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = int(re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0])
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(pmd, path='../tiles/', dict_file='../tcia_pathology_slides.tsv',
ref_file='../gbm_all_subtype_collections.2019-10-13.tsv'):
refdict = {'low': 0, 'high': 1, False: 0, True: 1, 'normal': 0, 'short': 1, 'long': 2}
dct = pd.read_csv(dict_file, sep='\t', header=0)
# dct = dct.loc[dct['used_in_proteome'] == True]
ref = pd.read_csv(ref_file, sep='\t', header=0)
ref = ref.dropna(subset=[pmd])
ref[pmd] = ref[pmd].replace(refdict)
big_images = []
if pmd == 'telomere':
normalimg = intersection(ref.loc[ref[pmd] == 0]['case'].tolist(), dct['case_id'].tolist())
normalsld = dct[dct['case_id'].isin(normalimg)]['slide_id'].tolist()
shortimg = intersection(ref.loc[ref[pmd] == 1]['case'].tolist(), dct['case_id'].tolist())
shortsld = dct[dct['case_id'].isin(shortimg)]['slide_id'].tolist()
longimg = intersection(ref.loc[ref[pmd] == 2]['case'].tolist(), dct['case_id'].tolist())
longsld = dct[dct['case_id'].isin(longimg)]['slide_id'].tolist()
for i in normalsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 0, path + "{}/".format(pctnum), sldnum])
for i in shortsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 1, path + "{}/".format(pctnum), sldnum])
for i in longsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 2, path + "{}/".format(pctnum), sldnum])
else:
negimg = intersection(ref.loc[ref[pmd] == 0]['case'].tolist(), dct['case_id'].tolist())
negsld = dct[dct['case_id'].isin(negimg)]['slide_id'].tolist()
posimg = intersection(ref.loc[ref[pmd] == 1]['case'].tolist(), dct['case_id'].tolist())
possld = dct[dct['case_id'].isin(posimg)]['slide_id'].tolist()
for i in negsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 0, path + "{}/".format(pctnum), sldnum])
for i in possld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 1, path + "{}/".format(pctnum), sldnum])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'sldnum'])
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, cut=0.3, batchsize=24):
trlist = []
telist = []
valist = []
CPTAC = alll
for i in range(cls):
subset = CPTAC.loc[CPTAC['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['sldnum'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['sldnum'])
train_tiles = | pd.concat([train_tiles, tile_ids]) | pandas.concat |
from __future__ import print_function, division
#from nilmtk.stats import intersect_many_fast
import matplotlib.pyplot as plt
import pandas as pd
from datetime import timedelta
import matplotlib.dates as mdates
from copy import deepcopy
import numpy as np
# NILMTK imports
from nilmtk.consts import SECS_PER_DAY
from nilmtk.timeframe import TimeFrame, convert_none_to_nat
class TimeFrameGroup():
""" A collection of nilmtk.TimeFrame objects.
The timeframegroup is used to store TimeFrames of a certain
type (eg. good sections) for a whole load profile together.
It then allows intersection functionality between multiple
load profiles to eg. find the good timeframes in all
the TimeFrameGroups.
The TimeFrameGroup has been rewritten using pandas DataFrames
because the previous implementation was far to slow
Attributes:
----------
_df: [start_time, end_time]
The dataframe with the sec_start sec_end
"""
def __init__(self, timeframes=None, starts_and_ends = None):
if isinstance(timeframes, TimeFrameGroup):
self._df = timeframes._df.copy()
if isinstance(timeframes, pd.core.indexes.datetimes.DatetimeIndex):
self._df = timeframes
elif isinstance(timeframes, pd.DataFrame):
self._df = timeframes.copy()
elif not starts_and_ends is None:
self._df = pd.DataFrame({'section_start': starts_and_ends['starts'], 'section_end': starts_and_ends['ends']})
elif not timeframes is None:
self._df = pd.DataFrame([(frame.start, frame.end) for frame in timeframes], columns = ['section_start', 'section_end'])
else:
self._df = pd.DataFrame(columns = ['section_start', 'section_end'])
def plot(self, ax=None, y=0, height=1, gap=0.05, color='b', **plot_kwargs):
if ax is None:
ax = plt.gca()
ax.xaxis.axis_date()
height -= gap * 2
for _, row in self._df.iterrows():
length = (row['section_end'] - row['section_start']).total_seconds() / SECS_PER_DAY
bottom_left_corner = (mdates.date2num(row['section_start']), y + gap)
rect = plt.Rectangle(bottom_left_corner, length, height,
color=color, **plot_kwargs)
ax.add_patch(rect)
ax.autoscale_view()
return ax
def plot_simple(self, ax=None, gap=0.05, **plot_kwargs):
for _, row in self._df.iterrows():
length = (row['section_end'] - row['section_start']).total_seconds() / SECS_PER_DAY
bottom_left_corner = (mdates.date2num(row['section_start']), 0)
rect = plt.Rectangle(bottom_left_corner, length, 1,
color='b', **plot_kwargs)
ax.add_patch(rect)
return ax
def plot_deltahistogram(self, bins = 10):
(self._df['section_end'] - self._df['section_start']).apply(lambda e: e.total_seconds()).hist(bins=bins)
def get_timeframe(self):
''' Returns the timeframe from start of first section to end of last section.
Returns:
timeframe: outer timeframe of this TimeFrameGroup
'''
if self._df.empty:
return TimeFrame(start = None, end = None)
idx = self._df.index
return TimeFrame(start = self._df.loc[idx[0], 'section_start'], end = self._df.loc[idx[-1], 'section_end'])
def union(self, other):
'''
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
diff(): |######--#######-##--######-#|
'''
assert isinstance(other, (TimeFrameGroup, list))
return TimeFrameGroup.union_many([self, other])
def union_many(groups):
'''
Function to do a do a fast intersection between many timeframes
Paramters
---------
groups: [nilmtk.TimeFrameGroup]
The group of timeframegroups to calculate the union for.
'''
all_events = pd.Series()
for group in groups:
all_events = all_events.append(pd.Series(1, index=pd.DatetimeIndex(group._df['section_start'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(group._df['section_end'])))
all_events.sort_index(inplace=True)
any_active = (all_events.cumsum()>0).astype(int)
switches = (any_active - any_active.shift(1).fillna(0))
starts = all_events[switches == 1].index
ends = all_events[switches == -1].index
result = pd.DataFrame({'section_start': starts, 'section_end':ends})
return TimeFrameGroup(result)
def diff(self, other):
'''
Difference between this and the other TimeFrameGroup.
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
diff(): |###--#------###-----###-----|
'''
assert isinstance(other, (TimeFrameGroup, list))
all_events = pd.Series()
all_events = all_events.append(pd.Series(1, index=pd.DatetimeIndex(self._df['section_start'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(self._df['section_end'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(other._df['section_start'])))
all_events = all_events.append(pd.Series(+1, index=pd.DatetimeIndex(other._df['section_end'])))
all_events.sort_index(inplace=True)
all_active = (all_events.cumsum()>0)
starts = all_events.index[all_active]
ends = all_active.shift(1)
if len(ends > 0):
ends[0] = False
ends = all_events[ends].index
result = pd.DataFrame({'section_start': starts, 'section_end':ends})
return TimeFrameGroup(result)
def intersection(self, other):
"""Returns a new TimeFrameGroup of self masked by other.
Illustrated example:
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
intersection(): |---##-----##-----------###-#|
"""
# Hier hat es geknallt als ich Accuracy als Error Metric berechnen wollte. Bei der Assertion
assert isinstance(other, (TimeFrameGroup, list))
return TimeFrameGroup.intersect_many([self, other])
def intersect_many(groups):
'''
Function to do a do a fast intersection between many timeframes
Paramters
---------
groups: [nilmtk.TimeFrameGroup]
The group of timeframegroups to calculate the intersection for.
'''
if any(map(lambda grp: len(grp._df) == 0, groups)):
return TimeFrameGroup()
all_events = | pd.Series() | pandas.Series |
import sys
from time import time, sleep
import pandas as pd
import psutil
import shutil
import glob
import os
try:
import _pickle as pickle
except:
import pickle
def print_progress_bar(count, total, start=0):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
if percents == 100:
if start == 0:
msg = '[%s] %d%%\n' % (bar, percents)
else:
elapsed = time() - start
if elapsed > 3600:
elapsed /= 3600
msg = '[%s] %d%% total: %0.2f hours\n' % (bar, percents, elapsed)
elif elapsed > 60:
elapsed /= 60
msg = '[%s] %d%% total: %0.2f minutes\n' % (bar, percents, elapsed)
else:
msg = '[%s] %d%% total: %0.2f seconds\n' % (bar, percents, elapsed)
elif start == 0 or percents == 0:
msg = '[%s] %d%%\r' % (bar, percents)
else:
elapsed = time() - start
eta = elapsed / (percents/100) - elapsed
if eta > 3600:
eta /= 3600
msg = '[%s] %d%% eta: %0.2f hours \r' % (bar, percents, eta)
elif eta > 60:
eta /= 60
msg = '[%s] %d%% eta: %0.2f minutes \r' % (bar, percents, eta)
else:
msg = '[%s] %d%% eta: %0.2f seconds \r' % (bar, percents, eta)
sys.stdout.write(msg[:-1].ljust(89)+msg[-1])
sys.stdout.flush()
def set_low_process_priority():
p = psutil.Process(os.getpid())
if sys.platform == "win32":
p.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
else:
p.nice(5)
def set_high_process_priority():
p = psutil.Process(os.getpid())
if sys.platform == "win32":
p.nice(psutil.ABOVE_NORMAL_PRIORITY_CLASS)
else:
p.nice(-5)
def getPlayerName(player):
return ".".join(os.path.basename(player).split(".")[:-1])
def saveWTL(config, p1, p2, w, t, l):
if w > 0 or t > 0 or l > 0:
data = {
"player1": p1,
"player2": p2,
"wins": w,
"ties": t,
"losses": l}
pickle.dump(data, open(config.data.performance_location+"staged_"+str(time())+".pickle","wb"))
sleep(0.05)
def mergeStagedWTL(config):
#only run if not already merging in another process
if os.path.exists(config.data.performance_location+"win_matrix_temp.csv"):
return None
else:
win_matrix_file = open(config.data.performance_location+"win_matrix_temp.csv", "w+")
merged_data = []
files = glob.glob(config.data.performance_location+"staged_*.pickle")
for file in files:
try:
data = pickle.load(open(file, "rb"))
found = False
for i in range(len(merged_data)):
if merged_data[i]['player1'] == data['player1'] and merged_data[i]['player2'] == data['player2']:
found = True
merged_data[i]['wins'] += data['wins']
merged_data[i]['ties'] += data['ties']
merged_data[i]['losses'] += data['losses']
break
elif merged_data[i]['player1'] == data['player2'] and merged_data[i]['player2'] == data['player1']:
found = True
merged_data[i]['wins'] += data['losses']
merged_data[i]['ties'] += data['ties']
merged_data[i]['losses'] += data['wins']
break
if not found:
merged_data.append(data)
os.remove(file)
except Exception as e:
continue
if os.path.exists(config.data.performance_location+"win_matrix.csv"):
df = pd.read_csv(config.data.performance_location+"win_matrix.csv", index_col=0)
else:
df = | pd.DataFrame() | pandas.DataFrame |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
"""测试填充无效值"""
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
filled_values = new_values.copy()
filled_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
filled_values, equal_nan=True))
def test_fill_inf(self):
"""测试填充无限值"""
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
self.assertEqual(str_to_list('abc'), ['abc'])
self.assertEqual(str_to_list(''), [])
self.assertRaises(AssertionError, str_to_list, 123)
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(list_or_slice('open', str_dict), [1])
self.assertEqual(list(list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(list_or_slice(0, str_dict)), [0])
self.assertEqual(list(list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_labels_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_input_to_list(self):
""" test util function input_to_list()"""
self.assertEqual(input_to_list(5, 3), [5, 5, 5])
self.assertEqual(input_to_list(5, 3, 0), [5, 5, 5])
self.assertEqual(input_to_list([5], 3, 0), [5, 0, 0])
self.assertEqual(input_to_list([5, 4], 3, 0), [5, 4, 0])
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_weekday_name(self):
""" test util func weekday_name()"""
self.assertEqual(weekday_name(0), 'Monday')
self.assertEqual(weekday_name(1), 'Tuesday')
self.assertEqual(weekday_name(2), 'Wednesday')
self.assertEqual(weekday_name(3), 'Thursday')
self.assertEqual(weekday_name(4), 'Friday')
self.assertEqual(weekday_name(5), 'Saturday')
self.assertEqual(weekday_name(6), 'Sunday')
def test_list_truncate(self):
""" test util func list_truncate()"""
l = [1,2,3,4,5]
ls = list_truncate(l, 2)
self.assertEqual(ls[0], [1, 2])
self.assertEqual(ls[1], [3, 4])
self.assertEqual(ls[2], [5])
self.assertRaises(AssertionError, list_truncate, l, 0)
self.assertRaises(AssertionError, list_truncate, 12, 0)
self.assertRaises(AssertionError, list_truncate, 0, l)
def test_maybe_trade_day(self):
""" test util function maybe_trade_day()"""
self.assertTrue(maybe_trade_day('20220104'))
self.assertTrue(maybe_trade_day('2021-12-31'))
self.assertTrue(maybe_trade_day(pd.to_datetime('2020/03/06')))
self.assertFalse(maybe_trade_day('2020-01-01'))
self.assertFalse(maybe_trade_day('2020/10/06'))
self.assertRaises(TypeError, maybe_trade_day, 'aaa')
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(next_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(next_trade_day(date_holiday)),
pd.to_datetime(next_holiday))
self.assertEqual(pd.to_datetime(next_trade_day(date_weekend)),
pd.to_datetime(next_weekend))
self.assertEqual(pd.to_datetime(next_trade_day(date_seems_trade_day)),
pd.to_datetime(next_seems_trade_day))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(next_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_prev_market_trade_day(self):
""" test the function prev_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = pd.to_datetime(date_seems_trade_day) - pd.Timedelta(7, 'd')
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
prev_christmas_xhkg = '20201224'
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_market_trade_day(date_holiday)),
| pd.to_datetime(prev_holiday) | pandas.to_datetime |
#!/usr/bin/env python3
import arbor
import pandas, seaborn
import matplotlib.pyplot as plt
# Construct chains of cells linked with gap junctions,
# Chains are connected by synapses.
# An event generator is attached to the first cell in the network.
#
# c --gj-- c --gj-- c --gj-- c --gj-- c
# |
# syn
# |
# c --gj-- c --gj-- c --gj-- c --gj-- c
#
# The individual cells consist of a soma and one dendrite
def make_cable_cell(gid):
# Build a segment tree
tree = arbor.segment_tree()
# Soma with radius 5 μm and length 2 * radius = 10 μm, (tag = 1)
s = tree.append(arbor.mnpos, arbor.mpoint(-10, 0, 0, 5), arbor.mpoint(0, 0, 0, 5), tag=1)
# Single dendrite with radius 2 μm and length 40 μm, (tag = 2)
b = tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(40, 0, 0, 2), tag=2)
# Label dictionary for cell components
labels = arbor.label_dict()
labels['soma'] = '(tag 1)'
labels['dend'] = '(tag 2)'
# Mark location for synapse site at midpoint of dendrite (branch 0 = soma + dendrite)
labels['synapse_site'] = '(location 0 0.6)'
# Gap junction site at connection point of soma and dendrite
labels['gj_site'] = '(location 0 0.2)'
# Label root of the tree
labels['root'] = '(root)'
# Paint dynamics onto the cell, hh on soma and passive properties on dendrite
decor = arbor.decor()
decor.paint('"soma"', arbor.density("hh"))
decor.paint('"dend"', arbor.density("pas"))
# Attach one synapse and gap junction each on their labeled sites
decor.place('"synapse_site"', arbor.synapse('expsyn'), 'syn')
decor.place('"gj_site"', arbor.junction('gj'), 'gj')
# Attach spike detector to cell root
decor.place('"root"', arbor.spike_detector(-10), 'detector')
cell = arbor.cable_cell(tree, labels, decor)
return cell
# Create a recipe that generates connected chains of cells
class chain_recipe(arbor.recipe):
def __init__(self, ncells_per_chain, nchains):
arbor.recipe.__init__(self)
self.nchains = nchains
self.ncells_per_chain = ncells_per_chain
self.props = arbor.neuron_cable_properties()
self.cat = arbor.default_catalogue()
self.props.register(self.cat)
def num_cells(self):
return self.ncells_per_chain * self.nchains
def cell_description(self, gid):
return make_cable_cell(gid)
def cell_kind(self, gid):
return arbor.cell_kind.cable
# Create synapse connection between last cell of one chain and first cell of following chain
def connections_on(self, gid):
if (gid == 0) or (gid % self.ncells_per_chain > 0):
return []
else:
src = gid-1
w = 0.05
d = 10
return [arbor.connection((src,'detector'), 'syn', w, d)]
# Create gap junction connections between a cell within a chain and its neighbor(s)
def gap_junctions_on(self, gid):
conns = []
chain_begin = int(gid/self.ncells_per_chain) * self.ncells_per_chain
chain_end = chain_begin + self.ncells_per_chain
next_cell = gid + 1
prev_cell = gid - 1
if next_cell < chain_end:
conns.append(arbor.gap_junction_connection((gid+1, 'gj'), 'gj', 0.015))
if prev_cell >= chain_begin:
conns.append(arbor.gap_junction_connection((gid-1, 'gj'), 'gj', 0.015))
return conns
# Event generator at first cell
def event_generators(self, gid):
if gid==0:
sched = arbor.explicit_schedule([1])
weight = 0.1
return [arbor.event_generator('syn', weight, sched)]
return []
# Place a probe at the root of each cell
def probes(self, gid):
return [arbor.cable_probe_membrane_voltage('"root"')]
def global_properties(self, kind):
return self.props
# Number of cells per chain
ncells_per_chain = 5
# Number of chains
nchains = 3
# Total number of cells
ncells = nchains * ncells_per_chain
#Instantiate recipe
recipe = chain_recipe(ncells_per_chain, nchains)
# Create a default execution context, domain decomposition and simulation
context = arbor.context()
decomp = arbor.partition_load_balance(recipe, context)
sim = arbor.simulation(recipe, decomp, context)
# Set spike generators to record
sim.record(arbor.spike_recording.all)
# Sampler
handles = [sim.sample((gid, 0), arbor.regular_schedule(0.1)) for gid in range(ncells)]
# Run simulation for 100 ms
sim.run(100)
print('Simulation finished')
# Print spike times
print('spikes:')
for sp in sim.spikes():
print(' ', sp)
# Plot the results
print("Plotting results ...")
df_list = []
for gid in range(ncells):
samples, meta = sim.samples(handles[gid])[0]
df_list.append(pandas.DataFrame({'t/ms': samples[:, 0], 'U/mV': samples[:, 1], 'Cell': f"cell {gid}"}))
df = | pandas.concat(df_list,ignore_index=True) | pandas.concat |
import math
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.stats as ss
import matplotlib.pyplot as plt
from collections import Counter
def convert(data, to):
converted = None
if to == 'array':
if isinstance(data, np.ndarray):
converted = data
elif isinstance(data, pd.Series):
converted = data.values
elif isinstance(data, list):
converted = np.array(data)
elif isinstance(data, pd.DataFrame):
converted = data.as_matrix()
elif to == 'list':
if isinstance(data, list):
converted = data
elif isinstance(data, pd.Series):
converted = data.values.tolist()
elif isinstance(data, np.ndarray):
converted = data.tolist()
elif to == 'dataframe':
if isinstance(data, pd.DataFrame):
converted = data
elif isinstance(data, np.ndarray):
converted = pd.DataFrame(data)
else:
raise ValueError("Unknown data conversion: {}".format(to))
if converted is None:
raise TypeError('cannot handle data conversion of type: {} to {}'.format(type(data),to))
else:
return converted
def conditional_entropy(x, y):
"""
Calculates the conditional entropy of x given y: S(x|y)
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
:param x: list / NumPy ndarray / Pandas Series
A sequence of measurements
:param y: list / NumPy ndarray / Pandas Series
A sequence of measurements
:return: float
"""
# entropy of x given y
y_counter = Counter(y)
xy_counter = Counter(list(zip(x,y)))
total_occurrences = sum(y_counter.values())
entropy = 0.0
for xy in xy_counter.keys():
p_xy = xy_counter[xy] / total_occurrences
p_y = y_counter[xy[1]] / total_occurrences
entropy += p_xy * math.log(p_y/p_xy)
return entropy
def cramers_v(x, y):
"""
Calculates Cramer's V statistic for categorical-categorical association.
Uses correction from Bergsma and Wicher, Journal of the Korean Statistical Society 42 (2013): 323-328.
This is a symmetric coefficient: V(x,y) = V(y,x)
Original function taken from: https://stackoverflow.com/a/46498792/5863503
Wikipedia: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
:param x: list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
:param y: list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
:return: float
in the range of [0,1]
"""
confusion_matrix = pd.crosstab(x,y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1))
rcorr = r-((r-1)**2)/(n-1)
kcorr = k-((k-1)**2)/(n-1)
return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1)))
def theils_u(x, y):
"""
Calculates Theil's U statistic (Uncertainty coefficient) for categorical-categorical association.
This is the uncertainty of x given y: value is on the range of [0,1] - where 0 means y provides no information about
x, and 1 means y provides full information about x.
This is an asymmetric coefficient: U(x,y) != U(y,x)
Wikipedia: https://en.wikipedia.org/wiki/Uncertainty_coefficient
:param x: list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
:param y: list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
:return: float
in the range of [0,1]
"""
s_xy = conditional_entropy(x,y)
x_counter = Counter(x)
total_occurrences = sum(x_counter.values())
p_x = list(map(lambda n: n/total_occurrences, x_counter.values()))
s_x = ss.entropy(p_x)
if s_x == 0:
return 1
else:
return (s_x - s_xy) / s_x
def correlation_ratio(categories, measurements):
"""
Calculates the Correlation Ratio (sometimes marked by the greek letter Eta) for categorical-continuous association.
Answers the question - given a continuous value of a measurement, is it possible to know which category is it
associated with?
Value is in the range [0,1], where 0 means a category cannot be determined by a continuous measurement, and 1 means
a category can be determined with absolute certainty.
Wikipedia: https://en.wikipedia.org/wiki/Correlation_ratio
:param categories: list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
:param measurements: list / NumPy ndarray / Pandas Series
A sequence of continuous measurements
:return: float
in the range of [0,1]
"""
categories = convert(categories, 'array')
measurements = convert(measurements, 'array')
fcat, _ = pd.factorize(categories)
cat_num = np.max(fcat)+1
y_avg_array = np.zeros(cat_num)
n_array = np.zeros(cat_num)
for i in range(0,cat_num):
cat_measures = measurements[np.argwhere(fcat == i).flatten()]
n_array[i] = len(cat_measures)
y_avg_array[i] = np.average(cat_measures)
y_total_avg = np.sum(np.multiply(y_avg_array,n_array))/np.sum(n_array)
numerator = np.sum(np.multiply(n_array,np.power(np.subtract(y_avg_array,y_total_avg),2)))
denominator = np.sum(np.power(np.subtract(measurements,y_total_avg),2))
if numerator == 0:
eta = 0.0
else:
eta = numerator/denominator
return eta
def associations(dataset, nominal_columns=None, mark_columns=False, theil_u=False, plot=True,
return_results = False, **kwargs):
"""
Calculate the correlation/strength-of-association of features in data-set with both categorical (eda_tools) and
continuous features using:
- Pearson's R for continuous-continuous cases
- Correlation Ratio for categorical-continuous cases
- Cramer's V or Theil's U for categorical-categorical cases
:param dataset: NumPy ndarray / Pandas DataFrame
The data-set for which the features' correlation is computed
:param nominal_columns: string / list / NumPy ndarray
Names of columns of the data-set which hold categorical values. Can also be the string 'all' to state that all
columns are categorical, or None (default) to state none are categorical
:param mark_columns: Boolean (default: False)
if True, output's columns' names will have a suffix of '(nom)' or '(con)' based on there type (eda_tools or
continuous), as provided by nominal_columns
:param theil_u: Boolean (default: False)
In the case of categorical-categorical feaures, use Theil's U instead of Cramer's V
:param plot: Boolean (default: True)
If True, plot a heat-map of the correlation matrix
:param return_results: Boolean (default: False)
If True, the function will return a Pandas DataFrame of the computed associations
:param kwargs:
Arguments to be passed to used function and methods
:return: Pandas DataFrame
A DataFrame of the correlation/strength-of-association between all features
"""
dataset = convert(dataset, 'dataframe')
columns = dataset.columns
if nominal_columns is None:
nominal_columns = list()
elif nominal_columns == 'all':
nominal_columns = columns
corr = pd.DataFrame(index=columns, columns=columns)
for i in range(0,len(columns)):
for j in range(i,len(columns)):
if i == j:
corr[columns[i]][columns[j]] = 1.0
else:
if columns[i] in nominal_columns:
if columns[j] in nominal_columns:
if theil_u:
corr[columns[j]][columns[i]] = theils_u(dataset[columns[i]],dataset[columns[j]])
corr[columns[i]][columns[j]] = theils_u(dataset[columns[j]],dataset[columns[i]])
else:
cell = cramers_v(dataset[columns[i]],dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
cell = correlation_ratio(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
if columns[j] in nominal_columns:
cell = correlation_ratio(dataset[columns[j]], dataset[columns[i]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
cell, _ = ss.pearsonr(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
corr.fillna(value=np.nan, inplace=True)
if mark_columns:
marked_columns = ['{} (nom)'.format(col) if col in nominal_columns else '{} (con)'.format(col) for col in columns]
corr.columns = marked_columns
corr.index = marked_columns
if plot:
plt.figure(figsize=kwargs.get('figsize',None))
sns.heatmap(corr, annot=kwargs.get('annot',True), fmt=kwargs.get('fmt','.2f'))
plt.show()
if return_results:
return corr
def numerical_encoding(dataset, nominal_columns='all', drop_single_label=False, drop_fact_dict=True):
"""
Encoding a data-set with mixed data (numerical and categorical) to a numerical-only data-set,
using the following logic:
- categorical with only a single value will be marked as zero (or dropped, if requested)
- categorical with two values will be replaced with the result of Pandas `factorize`
- categorical with more than two values will be replaced with the result of Pandas `get_dummies`
- numerical columns will not be modified
:param dataset: NumPy ndarray / Pandas DataFrame
The data-set to encode
:param nominal_columns: sequence / string
A sequence of the nominal (categorical) columns in the dataset. If string, must be 'all' to state that
all columns are nominal. If None, nothing happens. Default: 'all'
:param drop_single_label: Boolean (default: False)
If True, nominal columns with a only a single value will be dropped.
:param drop_fact_dict: Boolean (default: True)
If True, the return value will be the encoded DataFrame alone. If False, it will be a tuple of
the DataFrame and the dictionary of the binary factorization (originating from pd.factorize)
:return: DataFrame or (DataFrame, dict)
If drop_fact_dict is True, returns the encoded DataFrame. else, returns a tuple of the encoded DataFrame and
dictionary, where each key is a two-value column, and the value is the original labels, as supplied by
Pandas `factorize`. Will be empty if no two-value columns are present in the data-set
"""
dataset = convert(dataset, 'dataframe')
if nominal_columns is None:
return dataset
elif nominal_columns == 'all':
nominal_columns = dataset.columns
converted_dataset = pd.DataFrame()
binary_columns_dict = dict()
for col in dataset.columns:
if col not in nominal_columns:
converted_dataset.loc[:,col] = dataset[col]
else:
unique_values = pd.unique(dataset[col])
if len(unique_values) == 1 and not drop_single_label:
converted_dataset.loc[:,col] = 0
elif len(unique_values) == 2:
converted_dataset.loc[:,col], binary_columns_dict[col] = pd.factorize(dataset[col])
else:
dummies = | pd.get_dummies(dataset[col],prefix=col) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table( | StringIO(data) | pandas.compat.StringIO |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 15:17:16 2018
@author: trevor
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as st
import time
from sklearn import linear_model
from pylab import mpl
from scipy.optimize import fsolve
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from matplotlib.font_manager import _rebuild
_rebuild()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import backtracking
cbond_price = pd.read_csv('cbond_price.csv',encoding='GBK')
stock_price = | pd.read_csv('stock_price.csv',encoding='GBK') | pandas.read_csv |
import os
import gc
import time
import imghdr
from io import BytesIO
from typing import List, Optional
from datetime import datetime
import requests
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm # if you don't use IPython Kernel like jupyter, you should change "tqdm.notebook" to "tqdm"
from cairosvg import svg2png
from PIL import Image
import cv2
def get_opensea_api_key():
return os.getenv('OPENSEA_API_KEY')
def get_comet_ml_key():
return os.getenv('COMET_ML_KEY')
def is_image(url) -> bool:
"""
Determine if it is an image of png or jpeg.
Parameters
----------
url : str
Target url.
Returns
-------
True or False: Return True if this url content is an image of png or jpeg else returns False.
"""
img = requests.get(url).content
img_type = imghdr.what(None, h=img)
if img_type in ['png', 'jpeg']:
return True
else:
return False
def is_svg(url) -> bool:
"""
Determine if it is an image of svg.
Parameters
----------
url : str
Target url.
Returns
-------
True or False: Return True if this url content is an image of svg else returns False.
"""
if url.endswith(".svg"):
return True
else:
return False
def save_png(url, file_name) -> None:
"""
Save an image of png or jpeg as a png file.
Parameters
----------
url : str
Target url.
file_name : str
The file path of a saved png file.
Returns
-------
None
"""
img = requests.get(url).content
img = Image.open(BytesIO(img)).convert("RGBA")
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGRA)
cv2.imwrite(file_name, img, [int(cv2.IMWRITE_PNG_COMPRESSION), 3])
def save_svg(url, file_name) -> None:
"""
Save an image of svg as an svg file. The content that is svg data of animation can't save.
Parameters
----------
url : str
Target url.
file_name : str
The file path of a saved png file.
Returns
-------
None
"""
img = requests.get(url).content
img = svg2png(bytestring=img)
img = Image.open(BytesIO(img)).convert("RGBA")
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGRA)
cv2.imwrite(file_name, img)
def get_random_data(dir_name: str, num_loop: Optional[int] = None,
start_id: int = 0, is_test: bool = False) -> pd.DataFrame:
"""
Get data of NFT to be had registered OpenSea by using OpenSea API.
You can get a large amount of data randomly. If you want to change data to be acquired,
you should view the reference of OpenSea API and change 'params' and 'get_features'.
Also, I set the delay to reduce the server load by 1 process per 1 min.
Please change according to your preference(within the bounds of common sense).
If got data is other than png, jpeg, and svg(still image), the data can't save
(but continue the process).
Parameter
---------
dir_name : str
Directory path to save images.
num_loop : int
A number of loops. A number of getting data is 'num_loop' * 50
is_test : bool
Get small data(num_loop=5) regardless value of num_loop if you set "is_test=True".
Returns
-------
df : pd.DataFrame
The DataFrame consists of NFT data includes all image ids etc...
See Also
--------
get_features : list of str
There are hundreds of columns of original data, but the number of data to be
acquired is limited. Please change according to your preference if column names
that you need are not included.
params : dict of requests parameters
Like get_featrues, Please change according to your preference if you want to change
getting data.
"""
DATAPATH = dir_name
df = pd.DataFrame()
img_id = start_id
url = "https://api.opensea.io/api/v1/assets"
if is_test or num_loop is None:
num_loop = 5
print("This function execute on test mode(automatically changes to num_loop=5).")
for idx in tqdm(range(num_loop)):
try:
params = {"limit": "50",
"order_by": "sale_date",
"order_direction": "desc",
"offset": str(50*idx)}
response = requests.get(url, params=params)
data = response.json()
assets_df = pd.json_normalize(data['assets'])
for feature in assets_df.columns.values:
if feature not in df.columns.values:
df[feature] = None
for feature in df.columns.values:
if feature not in assets_df.columns.values:
assets_df[feature] = None
for i in range(assets_df.shape[0]):
img_url = assets_df.iloc[i]['image_url']
img_url.replace(" ", "")
if is_image(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_png(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
elif is_svg(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_svg(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
else:
continue
gc.collect() # Just in case, free the memory so that the process does not stop
time.sleep(60)
except:
gc.collect()
time.sleep(60)
continue
df = df.reset_index(drop=True)
df['image_id'] = (df.index.values.astype(int)+start_id).astype(str)
df['image_id'] = df['image_id'].apply(lambda x: x + '.png')
return df
def get_collection_data(dir_name: str, target_collections: Optional[List[str]] = None,
is_test: bool = False) -> pd.DataFrame:
"""
Get data of NFT to be had registered OpenSea by using OpenSea API.
You can get a large amount of data you prefer collection. If you want to change data to be acquired,
you should view the reference of OpenSea API and change 'params' and 'get_features'.
Also, I set the delay to reduce the server load by 1 process per 1 min.
Please change according to your preference(within the bounds of common sense).
If got data is other than png, jpeg, and svg(still image), the data can't save
(but continue the process).
Parameter
---------
dir_name : str
Directory path to save images.
target_collections : list of str
The list of collection names you prefer.
This variable can be set None, but you must set is_test=True.
is_test : bool
Get small data regardless values of target_collections if you set "is_test=True".
Returns
-------
df : pd.DataFrame
The DataFrame consists of NFT data includes all image ids etc...
See Also
--------
get_features : list of str
There are hundreds of columns of original data, but the number of data to be
acquired is limited. Please change according to your preference if column names
that you need are not included.
params : dict of requests parameters
Like get_featrues, Please change according to your preference if you want to change
getting data.
"""
DATAPATH = dir_name
e_count = 0
e_collection = []
if is_test:
print("This function execute on test mode.")
print("Automatically set target_collections:\n['cryptopunks', 'boredapeyachtclub', 'doodles-official']")
target_collections = ['cryptopunks', 'boredapeyachtclub', 'doodles-official']
df = pd.DataFrame()
img_id = 0
url = "https://api.opensea.io/api/v1/assets"
for collection in target_collections:
for idx in tqdm(range(10), ascii=True, desc=collection):
try:
params = {
"offset": str(50*idx),
"order_by": "sale_date",
"order_direction": "desc",
"limit": "50",
"collection": collection
}
response = requests.get(url, params=params)
data = response.json()
assets_df = pd.json_normalize(data['assets'])
for feature in assets_df.columns.values:
if feature not in df.columns.values:
df[feature] = None
for feature in df.columns.values:
if feature not in assets_df.columns.values:
assets_df[feature] = None
for i in range(assets_df.shape[0]):
img_url = assets_df.iloc[i]['image_url']
img_url.replace(" ", "")
if is_image(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_png(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
elif is_svg(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_svg(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
else:
continue
gc.collect() # Just in case, free the memory so that the process does not stop
time.sleep(60)
except:
e_count += 1
e_collection.append(collection)
gc.collect()
time.sleep(60)
continue
print(f"error count: {e_count}")
print(f"error collection: {list(set(e_collection))}")
df = df.reset_index(drop=True)
df['image_id'] = df.index.values.astype(str)
df['image_id'] = df['image_id'].apply(lambda x: x + '.png')
return df
def get_data(asset_contract_address: str, token_id: str):
"""
Get the asset data.
Parameters
----------
asset_contract_address : str
The string of asset contract address.
token_id : str
The string of token id.
Returns
-------
orders_df : pd.DataFrame
The dataframe of asset data.
"""
if type(token_id) != str:
token_id = str(token_id)
url = f"https://api.opensea.io/api/v1/asset/{asset_contract_address}/{token_id}/"
response = requests.request("GET", url)
data = response.json()
asset_df = pd.json_normalize(data)
return asset_df
def get_events_data(dir_name: str, num_loop: Optional[int] = 5,
start_id: int = 0, is_test: bool = False,
start_date: List[int] =[2020, 1, 1, 0, 0, 0], total_date: int = 700) -> pd.DataFrame:
"""
Get events data of NFT to be had registered OpenSea by using OpenSea API.
You can get a large amount of data randomly. If you want to change data to be acquired,
you should view the reference of OpenSea API and change 'params' and 'get_features'.
Also, I set the delay to reduce the server load by 1 process per 1 min.
Please change according to your preference(within the bounds of common sense).
If got data is other than png, jpeg, and svg(still image), the data can't save
(but continue the process).
Parameter
---------
dir_name : str
Directory path to save images.
num_loop : int
A number of loops. A number of getting data is 'num_loop' * 50
is_test : bool
Get small data(num_loop=5) regardless value of num_loop if you set "is_test=True".
Returns
-------
df : pd.DataFrame
The DataFrame consists of NFT data includes all image ids etc...
See Also
--------
get_features : list of str
There are hundreds of columns of original data, but the number of data to be
acquired is limited. Please change according to your preference if column names
that you need are not included.
params : dict of requests parameters
Like get_featrues, Please change according to your preference if you want to change
getting data.
"""
DATAPATH = dir_name
df = pd.DataFrame()
img_id = start_id
before_date = int(datetime(*start_date).timestamp())
headers = {"Accept": "application/json",
"X-API-KEY": get_opensea_api_key()}
if is_test:
num_loop = 3
total_date = 3
print("This function execute on test mode(automatically changes to total_date=3 and num_loop=3).")
for _ in tqdm(range(total_date)):
for idx in range(num_loop):
url = f"https://api.opensea.io/api/v1/events?event_type=offer_entered&only_opensea=false&occurred_before={before_date}"
try:
params = {"limit": "50",
"offset": str(1000*idx)}
response = requests.get(url, params=params, headers=headers)
data = response.json()
assets_df = pd.json_normalize(data['asset_events'])
for feature in assets_df.columns.values:
if feature not in df.columns.values:
df[feature] = None
for feature in df.columns.values:
if feature not in assets_df.columns.values:
assets_df[feature] = None
for i in range(assets_df.shape[0]):
img_url = assets_df.iloc[i]['asset.image_url']
img_url.replace(" ", "")
if is_image(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_png(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
elif is_svg(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_svg(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
else:
continue
gc.collect() # Just in case, free the memory so that the process does not stop
time.sleep(30)
except:
gc.collect()
time.sleep(30)
continue
before_date += 86400
df = df.reset_index(drop=True)
df['image_id'] = (df.index.values.astype(int)+start_id).astype(str)
df['image_id'] = df['image_id'].apply(lambda x: x + '.png')
return df
def concat_past_data(df: pd.DataFrame, num_past=10):
"""
Get the NFTs events data. Concatenate successfule price data to df.
Prameters
---------
df : pd.DataFrame
Dataframe of collection data.
num_past : int
Max number of the price data.
"""
for i in range(num_past):
df[f'past_price{i}'] = 0
address_list = df['asset_contract.address'].values
token_id_list = df['token_id'].values
for idx, url_li in tqdm(enumerate(zip(address_list, token_id_list))):
url = f"https://api.opensea.io/api/v1/events?asset_contract_address={url_li[0]}&token_id={url_li[1]}&only_opensea=false&offset=0&limit=50"
headers = {"Accept": "application/json",
"X-API-KEY": get_opensea_api_key()}
response = requests.request("GET", url, headers=headers)
data = response.json()
past_df = pd.json_normalize(data['asset_events'])
price_list = past_df.query("event_type == 'successful'")['total_price'].values
for i in range(min(num_past, len(price_list))):
df.loc[idx, f'past_price{i}'] = price_list[i]
time.sleep(30)
return df
def get_past_data(df: pd.DataFrame, dir_name: str):
"""
Get the NFTs events data as new dataframe. To use this function can get past data max 100.
Prameters
---------
df : pd.DataFrame
Dataframe of the collection data.
dir_name : str
The name of directory you want to save data.
"""
address_list = df['asset_contract.address'].values
token_id_list = df['token_id'].values
before_date_list = | pd.to_datetime(df['last_sale.created_date']) | pandas.to_datetime |
import pandas
import string
import math
import csv
import os
import re
from unicodedata import normalize
import unicodedata
def corrigir_nomes(nome):
nome = nome.replace('Á', 'A').replace('É', 'E').replace('Í', 'I').replace('Ó', 'O').replace('Ú', 'U').replace('Ç', 'C')
return nome
def localize_floats(row):
return [
str(el).replace('.', ',') if isinstance(el, float) else el
for el in row
]
def primeiro_algarismo(txt):
txt = txt[0]
return txt
def remover_ponto_virgula_zero(txt):
txt = str(txt).replace(",", "")
txt = str(txt).replace(".", "")
txt = str(txt).replace(" ", "")
txt = str(txt).split("e")
try:
txt = txt[0]
except:
txt = txt
#txt = str(txt).rsplit("0")
while str(txt).endswith("0"):
txt = txt[:-1]
return txt
def remover_acentos(txt):
return normalize('NFKD', txt).encode('ASCII', 'ignore').decode('utf-8')
def remover_acentos_lista(txt):
return [
normalize('NFKD', el).encode('ASCII', 'ignore').decode('ASCII') for el in txt
]
def carrega_dados(diretorio, cidade):
files = os.listdir(diretorio)
files = sorted(files)
primeiro = True
print(cidade)
for file in files:
if file.split("_")[0] == cidade:
print("entrei ->", file)
df = pandas.read_csv(diretorio + file, sep = ',', encoding='utf-8')
df2 = df.loc[df['NOME_MUNICIPIO'] == cidade]
if primeiro:
df1 = df2
primeiro = False
else:
df1 = | pandas.concat([df1,df2],ignore_index=True) | pandas.concat |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = | pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1) | pandas.concat |
import sys
#sys.path.append("..")
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.signal.conv as C
from epimodel import EpidemiologicalParameters, preprocess_data
np.random.seed(123456)
import argparse
import copy
import datetime
import itertools
import pickle
import re
from datetime import timedelta
import matplotlib.dates as mdates
import pandas as pd
import pymc3 as pm
from sklearn import preprocessing
US = True
MOBI = "include" # "exclude"
RADER_JOINT = False
GATHERINGS = 3
# MASKING = True # Now always true
# smoothing happens at data init time if at all
assert MOBI in ["exclude", "only", "include"]
mob_cols = [
"avg_mobility_no_parks_no_residential",
"residential_percent_change_from_baseline",
]
# Ds = pd.date_range('2020-05-01', '2020-09-01', freq='D')
# Adding 20 days to account for death delay
Ds = pd.date_range("2020-05-01", "2020-09-21", freq="D")
def fill_missing_days(df):
df = df.set_index(["date", "country"])
df = df.unstack(fill_value=-1).asfreq("D", fill_value=-1).stack().reset_index()
df = df.replace(-1, np.nan)
return interpolate_wearing_fwd_bwd(df)
def interpolate_wearing_fwd_bwd(df):
regions = df.country.unique()
cs = []
for r in regions:
c = df[df.country == r]
c = c.set_index("date")
c = c.interpolate(method="time", limit_direction="both").reset_index()
cs.append(c)
return pd.concat(cs)
# moving average
def smooth(x, window_len=7):
l = window_len
s = np.r_[x[l - 1 : 0 : -1], x, x[-2 : -l - 1 : -1]]
w = np.ones(window_len, "d")
return np.convolve(w / w.sum(), s, mode="valid")
def smooth_rader(df, win=7):
for r in df.label.unique():
s = df[df.label == r]
s["percent_mc"] = smooth(s["percent_mc"], window_len=win)[: -win + 1]
df[df.label == r] = s
return df
def joint_shop_work(df, THRESHOLD=2):
return (df.likely_wear_mask_grocery_shopping <= THRESHOLD) & (
df.likely_wear_mask_workplace <= THRESHOLD
)
def mean_shop_work(df, THRESHOLD=2):
venues = ["likely_wear_mask_grocery_shopping", "likely_wear_mask_workplace"]
df["percent_mc"] = df[venues].mean(axis=1)
return df["percent_mc"] <= THRESHOLD
def load_and_clean_rader_raw(THRESHOLD=2, SMOOTH_RADER=True): # or less
DATA_IN = "data/raw/"
directory = DATA_IN + "rader/sm_cny_data_1_21_21.csv"
us = pd.read_csv(directory)
masks = [
"likely_wear_mask_exercising_outside",
"likely_wear_mask_grocery_shopping",
"likely_wear_mask_visit_family_friends",
"likely_wear_mask_workplace",
]
# weights = ["weight_daily_national_13plus", "weight_state_weekly"]
us = us[["response_date", "state"] + masks] # + weights
codes = pd.read_excel(DATA_IN + "rader/cny_sm_codebook_2_5_21.xls")
num2name = codes[codes["column"] == "state"][["value", "label"]]
us = pd.merge(us, num2name, left_on="state", right_on="value").drop(
["value", "state"], axis=1
)
us["response_date"] = pd.to_datetime(us["response_date"])
if RADER_JOINT:
us["percent_mc"] = joint_shop_work(us, THRESHOLD)
else:
us["percent_mc"] = mean_shop_work(us, THRESHOLD)
us = (
us[["response_date", "label", "percent_mc"]]
.groupby(["response_date", "label"])
.mean()
.reset_index()
)
if SMOOTH_RADER:
us = smooth_rader(us)
return us
def load_and_clean_rader(THRESHOLD=2, SMOOTH_RADER=True): # or less
DATA_IN = "data/raw/"
directory = DATA_IN + "rader/rader_us_wearing_aggregated_mean_shop_and_work.csv"
us = pd.read_csv(directory)
us["response_date"] = pd.to_datetime(us["response_date"])
return us
def add_dummy_wearing_us(us, backfill=True):
rader_start = us.date.iloc[0] - timedelta(days=1)
fill_days = pd.date_range(Ds[0], rader_start, freq="D")
Rs = us.country.unique()
if backfill:
for s in Rs:
df = pd.DataFrame(columns=["date", "country", "percent_mc"])
df.date = fill_days
df.country = s
fill = us.set_index(["country", "date"]).loc[s].percent_mc.iloc[0]
df.percent_mc = fill
us = pd.concat([df, us])
# totally random dummy
else:
for s in us.country.unique():
df = pd.DataFrame(columns=["date", "country", "percent_mc"])
df.date = fill_days
df.country = s
df.percent_mc = np.random.random(len(df))
us = pd.concat([df, us])
us = us.sort_values(["date", "country"])
return us
def load_and_clean_wearing():
wearing = pd.read_csv(
"data/raw/umd/umd_national_wearing.csv",
parse_dates=["survey_date"],
infer_datetime_format=True,
).drop_duplicates()
wearing = wearing[(wearing.survey_date >= Ds[0]) & (wearing.survey_date <= Ds[-1])]
cols = ["country", "survey_date", "percent_mc"]
wearing = wearing[cols]
cols = ["country", "date", "percent_mc"]
wearing.columns = cols
# Append US
us_wearing = load_and_clean_rader()
us_wearing.columns = ["date", "country", "percent_mc"]
us_wearing = us_wearing[cols]
us_wearing = us_wearing.replace("Georgia", "Georgia-US")
us_wearing = us_wearing.replace("District of Columbia (DC)", "District of Columbia")
# Add dummy wearing back to 1st May
us_wearing = add_dummy_wearing_us(us_wearing, backfill=True)
wearing = | pd.concat([wearing, us_wearing]) | pandas.concat |
import pandas
import argparse
import ast
# Load arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument("--tp", type=str, help="The filename or path to the true positive csv", default='')
parser.add_argument("--tn", type=str, help="The filename or path to the true positive csv", default='')
parser.add_argument("-t", "--target", type=bool, help="True if you want to only convert the target", default=False)
args = parser.parse_args()
# Loads the source and target map csvs
source_map_df = pandas.read_csv('data/source_map.csv', converters={'cuis':ast.literal_eval})
target_map_df = | pandas.read_csv('data/target_map.csv', converters={'cuis':ast.literal_eval}) | pandas.read_csv |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from pandas.util._decorators import doc
from ..util import save_docx_table, get_top_substrs
from .dataset import _file_docs, _shared_docs
_plot_docs = _file_docs.copy()
_plot_docs['scope'] = _shared_docs['scope']
_plot_docs['subjects'] = _shared_docs['subjects']
_plot_docs['decode_values'] = '''decode_values : bool, optional
When handling categorical variables
that have been encoded through a BPt
dataset method, e.g., :func:`Dataset.ordinalize`,
then you may optionally either use either
the original categorical values before encoding
with decode_values = True, or use the current
internal values with decode_values = False.
::
default = True
'''
def _show_values(axs, count=True, space=0):
'''code from https://www.statology.org/seaborn-barplot-show-values/'''
def _single(ax):
longest = 0
for p in ax.patches:
_x = p.get_x() + p.get_width() + float(space)
_y = p.get_y() + p.get_height() - (p.get_height()*0.5)
if count:
value = f'{int(p.get_width())}'
else:
value = f'{p.get_width():.2f}'
# Keep track of longest label
if len(value) > longest:
longest = len(value)
ax.text(_x, _y, value, ha="left")
# Scale x lim a bit to compensate for adding extra text
# Specifically by longest value
scale = 1 + (longest / 100)
ax.set_xlim([scale*x for x in ax.get_xlim()])
if isinstance(axs, np.ndarray):
for ax in axs:
_single(ax)
else:
_single(axs)
def nan_info(self, scope='all'):
# Get data based on passed scope
cols = self.get_cols(scope)
data = self[cols]
# Check for data files
self._data_file_fail_check(cols)
# Get NaN counts
na_counts = data.isna().sum().sort_values(ascending=False)
if na_counts.sum() > 0:
print('Loaded NaN Info:')
print('There are:', na_counts.sum(), 'total missing values')
u_counts, c_counts = np.unique(na_counts, return_counts=True)
u_counts, c_counts = u_counts[1:], c_counts[1:]
inds = c_counts.argsort()
u_counts = u_counts[inds[::-1]]
c_counts = c_counts[inds[::-1]]
for u, c in zip(u_counts, c_counts):
if c > 1:
keys = list(na_counts[na_counts == u].index)
substrs = get_top_substrs(keys)
print(c, ' columns found with ', u, ' missing values',
' (column name overlap: ', substrs, ')', sep='')
print()
def _cont_info(self, cont_cols, subjs, measures, decimals, **extra_args):
if len(cont_cols) == 0:
return | pd.DataFrame() | pandas.DataFrame |
import h5py
import numpy as np
import pandas as pd
import os
from multiprocessing import cpu_count, Pool
from alcokit.util import fs_dict, is_audio_file
from alcokit.hdf.api import Database
from alcokit.score import Score
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# TODO : add handling of sparse matrices ?
def default_extract_func(abs_path):
from alcokit.fft import FFT
fft = abs(FFT.stft(abs_path))
score = Score.from_recurrence_matrix(fft)
return dict(fft=({}, fft.T), score=({}, score))
def sizeof_fmt(num, suffix='b'):
"""
straight from https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def _empty_info(features_names):
tuples = [("directory", ""), ("name", ""),
*[t for feat in features_names for t in [(feat, "dtype"), (feat, "shape"), (feat, "size")]
if feat != "score"]
]
idx = pd.MultiIndex.from_tuples(tuples)
return pd.DataFrame([], columns=idx)
def split_path(path):
parts = path.split("/")
prefix, file_name = "/".join(parts[:-1]), parts[-1]
return prefix, file_name
def file_to_db(abs_path, extract_func=default_extract_func, mode="w"):
"""
if mode == "r+" this will either:
- raise an Exception if the feature already exists
- concatenate data along the "feature_axis", assuming that each feature correspond to the same file
or file collections.
If you want to concatenate dbs along the "file_axis" consider using `concatenate_dbs(..)`
@param abs_path:
@param extract_func:
@param mode:
@return:
"""
logger.info("making db for %s" % abs_path)
tmp_db = ".".join(abs_path.split(".")[:-1] + ["h5"])
rv = extract_func(abs_path)
info = _empty_info(rv.keys())
info.loc[0, [("directory", ""), ("name", "")]] = split_path(abs_path)
with h5py.File(tmp_db, mode) as f:
for name, (attrs, data) in rv.items():
if issubclass(type(data), np.ndarray):
ds = f.create_dataset(name=name, shape=data.shape, data=data)
ds.attrs.update(attrs)
info.loc[0, name] = ds.dtype, ds.shape, sizeof_fmt(data.nbytes)
elif issubclass(type(data), pd.DataFrame):
pd.DataFrame(data).to_hdf(tmp_db, name, "r+")
f.flush()
if "info" in f.keys():
prior = pd.read_hdf(tmp_db, "info", "r")
info = pd.concat((prior, info.iloc[:, 2:]), axis=1)
info.to_hdf(tmp_db, "info", "r+")
f.close()
return tmp_db
def make_db_for_each_file(root_directory,
extract_func=default_extract_func,
extension_filter=is_audio_file,
n_cores=cpu_count()):
root_name, tree = fs_dict(root_directory, extension_filter)
args = [(os.path.join(dir, file), extract_func)
for dir, files in tree.items() for file in files]
with Pool(n_cores) as p:
tmp_dbs = p.starmap(file_to_db, args)
return tmp_dbs
def collect_infos(tmp_dbs):
infos = []
for db in tmp_dbs:
infos += [Database(db).info]
return pd.concat(infos, ignore_index=True)
def collect_scores(tmp_dbs):
scores = []
offset = 0
for db in tmp_dbs:
scr = Database(db).score
scr.loc[:, ("start", "stop")] = scr.loc[:, ("start", "stop")].values + offset
scr.loc[:, "name"] = ".".join(db.split(".")[:-1])
scores += [scr]
offset = scr.last_stop
return pd.DataFrame(pd.concat(scores, ignore_index=True))
def zip_prev_next(iterable):
return zip(iterable[:-1], iterable[1:])
def ds_definitions_from_infos(infos):
tb = infos.iloc[:, 2:].T
paths = ["/".join(parts) for parts in infos.iloc[:, :2].values]
# change the paths' extensions
paths = [".".join(path.split(".")[:-1]) + ".h5" for path in paths]
features = set(tb.index.get_level_values(0))
ds_definitions = {}
for f in features:
dtype = tb.loc[(f, "dtype"), :].unique().item()
shapes = tb.loc[(f, "shape"), :].values
dims = shapes[0][1:]
assert all(shp[1:] == dims for shp in
shapes[1:]), "all features should have the same dimensions but for the first axis"
layout = Score.from_duration([s[0] for s in shapes])
ds_shape = (layout.last_stop, *dims)
layout.index = paths
ds_definitions[f] = {"shape": ds_shape, "dtype": dtype, "layout": layout}
return ds_definitions
def create_datasets_from_defs(target, defs, mode="w"):
with h5py.File(target, mode) as f:
for name, params in defs.items():
f.create_dataset(name, shape=params["shape"], dtype=params["dtype"])
layout = params["layout"]
layout.reset_index(drop=False, inplace=True)
layout = layout.rename(columns={"index": "name"})
pd.DataFrame(layout).to_hdf(target, "layouts/" + name, "r+")
f.close()
return
def make_integration_args(target):
args = []
with h5py.File(target, "r") as f:
for feature in f["layouts"].keys():
df = Score( | pd.read_hdf(target, "layouts/" + feature) | pandas.read_hdf |
# Copyright 2021 The CGLB Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from operator import iconcat
from pathlib import Path
from typing import List, Optional, Union
import glob
import click
import json
import pandas as pd
import numpy as np
from cglb_experiments.plotting import Plotter, TablePrinter, XAxis
from cglb_experiments.utils import short_names
@click.group()
def main():
pass
output_format_type = click.Choice(["standard", "latex", "markdown", "html", "excel"])
@main.command()
@click.option("-f", "--output-format", type=output_format_type, default="standard")
@click.option("-o", "--output", type=click.Path())
@click.option("--paths-file/--no-paths-file", default=False)
@click.argument("files", nargs=-1, type=click.Path(resolve_path=True, dir_okay=False, exists=True))
def results_table(files: List[str], output_format: str, paths_file, output: str):
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("precision", 3)
models = dict()
dataset_names = set()
if paths_file:
# File with a list of paths to files with results
file = Path(files[0]).expanduser().resolve()
files = file.read_text().split()
for file in files:
f = Path(file)
file_text = Path(f).expanduser().read_text()
content = json.loads(file_text)
_content_id = content.pop("id")
dataset, name = short_names(f)
dataset_names.add(dataset)
column = pd.DataFrame(content.values(), index=content.keys(), columns=[dataset])
columns = models.get(name, column)
if columns is not column:
models[name] = | pd.concat([columns, column], axis=1) | pandas.concat |
import os
import json
import pandas as pd
import zipfile
from werkzeug.utils import secure_filename
import shutil
import time
from random import randint
from datetime import timedelta
import tempfile
import sys
from elasticsearch import Elasticsearch
##
##
# dataframes
from dataframes import dataframe
# functions
from grading_checks import naming, usage, documentation_logging, error_handling
from soft_checks import activity_stats, project_folder_structure, project_structure, template_check, selector_check
from flask import Flask, request, render_template, redirect, url_for, session, jsonify, Response, send_file, make_response
from flask_session import Session
from flask_socketio import SocketIO, emit
from flask_mysqldb import MySQL
import MySQLdb.cursors
from passlib.hash import sha256_crypt
import pdfkit
# es = Elasticsearch(['https://098b8510b627461cb0e77d37d10c4511.us-east-1.aws.found.io:9243'],
# http_auth=('elastic', '<PASSWORD>'))
application = app = Flask(__name__, static_folder='./static/dist', template_folder="./static")
# dont save cache in web browser (updating results image correctly)
app.config["CACHE_TYPE"] = "null"
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['UPLOAD_PATH'] = '/file/'
app.config['ALLOWED_EXTENSIONS'] = set(['zip'])
app.config['SESSION_TYPE'] = 'filesystem'
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=1)
app.config['SECRET_KEY'] = randint(0,99999999999999999999)
app.config['MYSQL_HOST'] = 'us-sql01.mysql.database.azure.com'
app.config['MYSQL_PORT'] = 3306
app.config['MYSQL_USER'] = 'us-evalsql01@us-sql01'
app.config['MYSQL_PASSWORD'] = '<PASSWORD>'
app.config['MYSQL_DB'] = 'sleipnir'
app.config['APP_ADMIN_RIGHT'] = 'admin'
mysql = MySQL(app)
# Check Configuration section for more details
Session(app)
socketio = SocketIO(app, async_mode="eventlet")
thread = None
######
gexf = ''
df_annotation = []
main_location = ""
dict_score = {}
df_invokeWf = []
pepper = 'zxf98g7yq3whretgih'
######
@app.route("/")
def login():
with app.app_context():
folderPathList = [os.getcwd().replace("\\", "/") + app.config['UPLOAD_PATH'] + path for path in
os.listdir(os.getcwd() + app.config['UPLOAD_PATH'])]
filteredFolderPathList = [path for path in folderPathList if time.time() - os.path.getmtime(path) > 900]
for folder in filteredFolderPathList:
shutil.rmtree(folder, True)
sessionPathList = [os.getcwd().replace("\\", "/") + '/flask_session/' + path for path in
os.listdir(os.getcwd() + '/flask_session/')]
filteredSessionPathList = [path for path in sessionPathList if time.time() - os.path.getmtime(path) > 900]
for ses in filteredSessionPathList:
print(ses)
os.remove(ses)
if session.get("loggedin"):
return redirect(url_for('upload'))
else:
return render_template('login.html')
@app.route("/login", methods=['POST'])
def validate_user():
with app.app_context():
requestData = json.loads(str(request.data, encoding="utf-8"))
tenant = requestData['tenant']
username = requestData['username']
password = requestData['password']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT id FROM tenants WHERE tenant_name = %s", (tenant,))
tenant_record = cursor.fetchone()
if tenant_record:
tenant_id = tenant_record["id"]
cursor.execute('SELECT id, password FROM users WHERE username = %s AND tenant_id = %s', (username, tenant_id,))
user_record = cursor.fetchone()
if user_record:
user_id = user_record["id"]
storedHashedPassword = user_record["password"]
if sha256_crypt.verify(password+pepper, storedHashedPassword):
resp = jsonify({"result": render_template('fileUpload.html',
username=username,
user_id=user_id)})
session['loggedin'] = True
session['id'] = user_id
session['username'] = username
cursor.close()
return make_response(resp, 200)
else:
cursor.close()
resp = jsonify({"message": "Wrong password"})
return make_response(resp, 400)
else:
cursor.close()
resp = jsonify({"message": "User not exists"})
return make_response(resp, 400)
else:
cursor.close()
resp = jsonify({"message": "Tenant not exists"})
return make_response(resp, 400)
@app.route('/logout')
def logout():
with app.app_context():
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect(url_for('login'))
@app.route("/admin", methods=['GET'])
def admin():
with app.app_context():
if session.get('loggedin'):
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * FROM users_rights WHERE user_id = %s "
"AND right_id IN (SELECT id FROM rights WHERE `right` = %s);",
(int(session.get('id')), app.config['APP_ADMIN_RIGHT'],))
if cursor.fetchone():
cursor.close()
return render_template('adminPanel.html',
username=session.get('username'),
user_id=session.get('id'))
else:
cursor.close()
return "Not Authorized"
else:
return "Please login first"
@app.route('/upload')
def upload():
with app.app_context():
folderPathList = [os.getcwd().replace("\\", "/") + app.config['UPLOAD_PATH'] + path for path in os.listdir(os.getcwd() + app.config['UPLOAD_PATH'])]
filteredFolderPathList = [path for path in folderPathList if time.time() - os.path.getmtime(path) > 900]
for folder in filteredFolderPathList:
shutil.rmtree(folder, True)
if session.get('loggedin'):
return render_template('fileUpload.html',
username=session.get('username'),
user_id=session.get('id'))
else:
return "Please login first"
def background_thread():
while True:
socketio.emit('message', {'alive': "Alive"})
socketio.sleep(60)
@socketio.on('connect')
def connect():
global thread
if thread is None:
thread = socketio.start_background_task(target=background_thread)
@app.route("/processing", methods=["POST"])
def processing():
requestData=json.loads(str(request.data, encoding="utf-8"))
# Get related info from Project.json (name and description)
folderPath = session.get('folderPath')
fileLocationStr = session.get('fileLocationStr')
files = session.get('files')
df_json = documentation_logging.grade_project_json_name_desc(folderPath)
df_json_exp = pd.DataFrame(df_json.subfiles.tolist(), index=df_json['index']).stack().reset_index()
df_json_exp.columns = ['projectId', 'fileIndex', 'filePath']
lst_name = []
df_json['projectName'] = df_json.apply(lambda x: x['projectDetail']['projectName'], axis=1)
for name in list(df_json['projectName']):
if name not in lst_name:
lst_name.append(name)
else:
count = 2
dup_name = name + '_' + str(count)
while dup_name in lst_name:
count += 1
dup_name = name + '_' + str(count)
lst_name.append(dup_name)
df_json['projectName'] = lst_name
df_json_exp = pd.merge(df_json_exp, df_json.loc[:, ["mainFolder", 'projectName']].reset_index(), how="left",
left_on="projectId", right_on="index")
df_json_exp.drop(columns=['fileIndex', "index"], inplace=True)
if requestData["setting"]["jsonLog"]:
project_detail = list(df_json.copy().reset_index().loc[:, ['index', 'projectDetail', 'projectName']]
.T.to_dict().values())
json_name_score = df_json.namingScore.sum() / len(df_json.namingScore)
json_description_score = df_json.descriptionScore.sum() / len(df_json.descriptionScore)
else:
json_name_score = "[Not evaluated]"
json_description_score = "[Not evaluated]"
project_detail = ['Not evaluated']
# scans all project files and populates dataframes with relevant info
socketio.emit('progress', {'data': 'Processing Files ...'})
socketio.sleep(0.1)
lst_sub_df = [dataframe.populate_dataframe(files[i], df_json) for i in range(len(files))]
df_variable = pd.merge(pd.concat([x[0] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_argument = pd.merge(pd.concat([x[1] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_catches = pd.merge(pd.concat([x[2] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_activity = pd.merge(pd.concat([x[3] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False),
df_json_exp, how="left", on="filePath")
df_annotation = pd.concat([x[4] for x in lst_sub_df], ignore_index=True).drop_duplicates(inplace=False)
df_selector = pd.merge( | pd.concat([x[5] for x in lst_sub_df], ignore_index=True) | pandas.concat |
import requests, re, json, csv
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
confirmed_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'
deaths_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv'
recovered_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv'
confirmed_total_data = []
deaths_total_data = []
recovered_total_data = []
with requests.Session() as s:
download = s.get(confirmed_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
confirmed_total_data.append(row)
with requests.Session() as s:
download = s.get(deaths_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
deaths_total_data.append(row)
with requests.Session() as s:
download = s.get(recovered_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
recovered_total_data.append(row)
# confirmed_total_data[0]
confirmed_df = pd.DataFrame(confirmed_total_data[1:], columns=confirmed_total_data[0])
deaths_df = pd.DataFrame(deaths_total_data, columns=deaths_total_data[0])
recovered_total_data = pd.DataFrame(recovered_total_data, columns=recovered_total_data[0])
confirmed_cols = confirmed_df.columns[confirmed_df.columns.str.endswith('20')]
confimred_result = pd.DataFrame()
confirmed_result = confirmed_df.filter(confirmed_cols, axis=1)
confirmed_result = confirmed_result.astype(int)
confirmed_result.loc['Total Confimred'] = confirmed_result.sum()
confirmed_result = confirmed_result.tail(1)
confirmed_result.columns = range(confirmed_result.shape[1])
confirmed_result = list(confirmed_result.iloc[0])
print(confirmed_result)
death_cols = deaths_df.columns[deaths_df.columns.str.endswith('20')]
deaths_result = pd.DataFrame()
deaths_result = deaths_df.filter(death_cols, axis=1)
deaths_result = deaths_result.drop(deaths_result.index[0])
deaths_result = deaths_result.astype(int)
deaths_result.loc['Total Deaths'] = deaths_result.sum()
deaths_result = deaths_result.tail(1)
deaths_result.columns = range(deaths_result.shape[1])
deaths_result = list(deaths_result.iloc[0])
print(deaths_result)
recover_cols = recovered_total_data.columns[recovered_total_data.columns.str.endswith('20')]
recover_result = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import warnings
warnings.filterwarnings('ignore')
import pandas as pd, numpy as np
import math, json, gc, random, os, sys
import torch
import logging
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from sklearn.model_selection import train_test_split
from catalyst.dl import SupervisedRunner
from catalyst.contrib.dl.callbacks import WandbLogger
from contextlib import contextmanager
from catalyst.dl.callbacks import AccuracyCallback, F1ScoreCallback, OptimizerCallback
#from pytorch_memlab import profile, MemReporter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[2]:
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
# In[3]:
set_seed(2020)
# In[4]:
test = pd.read_json('/kaggle/input/stanford-covid-vaccine/test.json', lines=True)
samplesub= pd.read_csv('/kaggle/input/stanford-covid-vaccine/sample_submission.csv')
# In[5]:
bpp_max=[]
bpp_mean =[]
id = test.id.values
for i in id:
probability = np.load('../input/stanford-covid-vaccine'+'/bpps/%s.npy'%i)
bpp_max.append(probability.max(-1).tolist())
bpp_mean.append(probability.mean(-1).tolist())
test['bpp_max']=bpp_max
test['bpp_mean']=bpp_mean
# In[6]:
test_public=test[test['seq_length']==107]
test_private=test[test['seq_length']==130]
# In[7]:
test_public_x=test_public.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]
test_private_x=test_private.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]
#CUDAに乗らないので、privateデータのサイズを小さくする。
test_private_x1,test_private_x2=train_test_split(test_private_x,test_size=0.5)
# In[8]:
token2int = {x:i for i, x in enumerate('().<KEY>')}
def preprocess_inputs_public(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_public_x['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_public_x['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
def preprocess_inputs_private1(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_private_x1['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_private_x1['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
def preprocess_inputs_private2(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_private_x2['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_private_x2['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
# In[9]:
test_public_inputs = torch.from_numpy(preprocess_inputs_public(test_public_x)).to(device).float()
test_private_inputs1 = torch.from_numpy(preprocess_inputs_private1(test_private_x1)).to(device).float()
test_private_inputs2 = torch.from_numpy(preprocess_inputs_private2(test_private_x2)).to(device).float()
# In[10]:
#print('train_入力:{}\nvalue_入力:{}\ntrain_ラベル:{}\nvalue_ラベル:{}'.format(train_inputs.shape,val_inputs.shape,train_outputs.shape,val_outputs.shape))
# In[11]:
class LSTM_model(nn.Module):
def __init__(
self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2
):
super(LSTM_model, self).__init__()
self.pred_len = pred_len
self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)
self.lstm = nn.LSTM(
input_size=embed_dim * 3+2,
hidden_size=hidden_dim,
num_layers=hidden_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.linear = nn.Linear(hidden_dim * 2, 5)
def forward(self, seqs):
embed = self.embeding(seqs[:,:,0:3].long())
reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)
output, hidden = self.lstm(reshaped)
truncated = output[:, : self.pred_len, :]
out = self.linear(truncated)
return out
# In[12]:
class GRU_model(nn.Module):
def __init__(
self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2
):
super(GRU_model, self).__init__()
self.pred_len = pred_len
self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)
self.gru = nn.GRU(
input_size=embed_dim * 3+2,
hidden_size=hidden_dim,
num_layers=hidden_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.linear = nn.Linear(hidden_dim * 2, 5)
def forward(self, seqs):
embed = self.embeding(seqs[:,:,0:3].long())
reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)
output, hidden = self.gru(reshaped)
truncated = output[:, : self.pred_len, :]
out = self.linear(truncated)
return out
# In[13]:
LSTM_weights_path='../input/weight11/LSTM_ver20.pth'
def get_LSTM_model(seq_len=107, pred_len=68):
model = LSTM_model(seq_len=seq_len, pred_len=pred_len)
checkpoint = torch.load(LSTM_weights_path)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device("cuda")
model.to(device)
model.eval()
return model
# In[14]:
GRU_weights_path='../input/weight11/GRU_ver8'
def get_GRU_model(seq_len=107, pred_len=68):
model = GRU_model(seq_len=seq_len, pred_len=pred_len)
checkpoint = torch.load(GRU_weights_path)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device("cuda")
model.to(device)
model.eval()
return model
# In[15]:
with torch.no_grad():
model =get_LSTM_model()
prediction=model(test_public_inputs)
result_public_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_LSTM_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs1)
result_private1_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_LSTM_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs2)
result_private2_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
# In[16]:
with torch.no_grad():
model =get_GRU_model()
prediction=model(test_public_inputs)
result_public_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_GRU_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs1)
result_private1_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_GRU_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs2)
result_private2_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
# In[17]:
df0 = pd.DataFrame(index=range(39), columns=['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',])
df0=df0.fillna(0)
# In[18]:
test_public_id=test_public['id']
idlist_public=test_public_id.values.tolist()
# In[19]:
test_private_id1=test_private_x1['id']
idlist_private1=test_private_id1.values.tolist()
idlist_private1[-5:]
# In[20]:
test_private_id2=test_private_x2['id']
idlist_private2=test_private_id2.values.tolist()
idlist_private2[:5]
# In[21]:
#無理やりソートすることに
testindex=samplesub.loc[:,['id_seqpos']]
testindex=testindex.reset_index()
# In[22]:
df1 = pd.DataFrame(result_public_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_public[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_public_LSTM)-1):
id = idlist_public[j+1]
df2 = pd.DataFrame(result_public_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
public_dataframe=df1
df1 = pd.DataFrame(result_private1_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private1[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private1_LSTM)-1):
id = idlist_private1[j+1]
df2 = pd.DataFrame(result_private1_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe1=df1
df1 = pd.DataFrame(result_private2_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private2[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private2_LSTM)-1):
id = idlist_private2[j+1]
df2 = pd.DataFrame(result_private2_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe2=df1
# In[23]:
merged_dataframe=pd.concat([public_dataframe,private_dataframe1,private_dataframe2])
pre_submission_LSTM=pd.merge(testindex,merged_dataframe)
# In[24]:
pre_submission_LSTM
# In[25]:
df1 = pd.DataFrame(result_public_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_public[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_public_GRU)-1):
id = idlist_public[j+1]
df2 = pd.DataFrame(result_public_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
public_dataframe=df1
df1 = pd.DataFrame(result_private1_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private1[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private1_GRU)-1):
id = idlist_private1[j+1]
df2 = pd.DataFrame(result_private1_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe1=df1
df1 = pd.DataFrame(result_private2_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private2[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private2_GRU)-1):
id = idlist_private2[j+1]
df2 = pd.DataFrame(result_private2_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe2=df1
# In[26]:
merged_dataframe= | pd.concat([public_dataframe,private_dataframe1,private_dataframe2]) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import random
import matplotlib
from collections import OrderedDict
import seaborn as sns
import matplotlib.gridspec as gridspec
from matplotlib.font_manager import FontProperties
plt.rcdefaults()
plt.rc('xtick.major', size = 0, width=0)
plt.rc('ytick.major', size = 0, width=0)
data_dir = r'/home/greydon/Documents/GitHub/afids-clinical/data/input_fid_native'
data_mni_dir = r'/home/greydon/Documents/GitHub/afids-clinical/data/input_fid_MNI_linear_combined'
#data_dir = r'C:\Users\greydon\Documents\github\afids_parkinsons\input\input_fid'
show_only = True
sub_ignore = [146]
fid_dic = {1: 'AC',
2: 'PC',
3: 'ICS',
4: 'PMJ',
5: 'SIPF',
6: 'RSLMS',
7: 'LSLMS',
8: 'RILMS',
9: 'LILMS',
10: 'CUL',
11: 'IMS',
12: 'RMB',
13: 'LMB',
14: 'PG',
15: 'RLVAC',
16: 'LLVAC',
17: 'RLVPC',
18: 'LLVPC',
19: 'GENU',
20: 'SPLE',
21: 'RALTH',
22: 'LALTH',
23: 'RSAMTH',
24: 'LSAMTH',
25: 'RIAMTH',
26: 'LIAMTH',
27: 'RIGO',
28: 'LIGO',
29: 'RVOH',
30: 'LVOH',
31: 'ROSF',
32: 'LOSF'
}
fid_desc = {1: 'AC',
2: 'PC',
3: 'Infracollicular Sulcus',
4: 'PMJ',
5: 'Superior IPF',
6: 'Right Superior LMS',
7: 'Left Superior LMS',
8: 'Right Inferior LMS',
9: 'Left Inferior LMS',
10: 'Culmen',
11: 'Intermammillary Sulcus',
12: 'Right Mammilary Body',
13: 'Left Mammilary Body',
14: 'Pineal Gland',
15: 'Right LV at AC',
16: 'Left LV at AC',
17: 'Right LV at PC',
18: 'Left LV at PC',
19: 'Genu of CC',
20: 'Splenium of CC',
21: 'Right AL Temporal Horn',
22: 'Left AL Tempral Horn',
23: 'R. Sup. AM Temporal Horn',
24: 'L. Sup. AM Temporal Horn',
25: 'R Inf. AM Temp Horn',
26: 'L Inf. AM Temp Horn',
27: 'Right IG Origin',
28: 'Left IG Origin',
29: 'R Ventral Occipital Horn',
30: 'L Ventral Occipital Horn',
31: 'R Olfactory Fundus',
32: 'L Olfactory Fundus'
}
def plot_fiducials(data_plot, expert_mean, data_dir,analysis=2, showOnly=False):
random.seed(1)
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(raters))]
min_val = -2.5
max_val = 2.5
major_ticks = np.linspace(min_val,max_val, 7)
fig = plt.figure(figsize=(18,8))
handles = {}
data_cnt = 1
for ifid in range(4):
for jfid in range(8):
ax = plt.subplot2grid((4,8),(ifid,jfid), projection='3d')
tempData = data_plot[data_plot['fid'].isin([data_cnt])]
rater_labels = tempData['rater'].values
if analysis == 1:
plot_title = 'Distance from the average of expert raters'
file_name = 'distance_from_expert_mean'
tempData = tempData.loc[:,'x':'z'].values - expert_mean.loc[expert_mean['fid'].isin([data_cnt]),'x':'z'].values
elif analysis == 2:
plot_title = 'Distance from the average of all raters'
file_name = 'distance_from_all_raters_mean'
tempData = tempData.loc[:,'x':'z'].values - tempData.loc[:,'x':'z'].mean().values
elif analysis == 3:
plot_title = 'Distance from average MCP'
file_name = 'distance_from_avg_mcp'
tempData = tempData.loc[:,'x':'z'].values - tempData.loc[:,'x':'z'].mean().values
nov_cnt = 1
exp_cnt = 1
rater_labels_final_tmp = {}
for i in range(len(rater_labels)): #plot each point + it's index as text above
if rater_labels[i] in ('AT','RC','MJ'):
rate_label = f"Novice 0{nov_cnt}"
nov_cnt += 1
else:
rate_label = f"Expert 0{exp_cnt}"
exp_cnt += 1
rater_labels_final_tmp[rate_label] = rater_labels[i]
rater_labels_final = {}
for irate in sorted(list(rater_labels_final_tmp)):
rater_labels_final[irate]=rater_labels_final_tmp[irate]
print(rater_labels_final)
for irate in list(rater_labels_final):
rater_idx = [i for i,x in enumerate(rater_labels) if x == rater_labels_final[irate]][0]
l1 = ax.scatter(tempData[rater_idx,0], tempData[rater_idx,1], tempData[rater_idx,2], marker='o', c=color[rater_idx],edgecolors='black', s=50, label=irate)
handles[irate] = l1
ax.plot((min_val,min_val), (min_val,min_val), (min_val-0.1,max_val+0.1), 'black', linewidth=1.0)
ax.set_xlim([min_val,max_val])
ax.set_ylim([min_val,max_val])
ax.set_zlim([min_val,max_val])
ax.set_xlabel('x',labelpad=-15, fontweight='bold', fontsize=14)
ax.set_ylabel('y',labelpad=-15, fontweight='bold', fontsize=14)
ax.set_zlabel('z',labelpad=-15, fontweight='bold', fontsize=14)
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.zaxis.set_ticklabels([])
ax.zaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.set_xticks(major_ticks)
ax.set_yticks(major_ticks)
ax.set_zticks(major_ticks)
ax.grid(which='major', alpha=0.5)
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.zaxis.pane.set_edgecolor('black')
ax.xaxis.pane.set_alpha(1)
ax.yaxis.pane.set_alpha(1)
ax.zaxis.pane.set_alpha(1)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.view_init(elev=25, azim=44)
ax.set_title(str(data_cnt) + ': ' + fid_dic[data_cnt], pad=2, fontweight='bold', fontsize=16)
data_cnt += 1
fig.subplots_adjust(hspace=0.15, wspace=0.05, top=0.90, bottom=0.06, left=0.02,right=0.9)
plt.legend(handles=handles.values(), fontsize=12, bbox_to_anchor=[2.1, 2.7], handletextpad=0.05)
fig.suptitle(plot_title, y = 0.98, fontsize=22, fontweight='bold')
if not showOnly:
output_dir = os.path.join(data_dir,'plots')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir, f"{file_name}.svg"),transparent=True)
plt.savefig(os.path.join(output_dir, f"{file_name}.png"),transparent=True,dpi=450)
plt.savefig(os.path.join(output_dir, f"{file_name}_white.png"),transparent=False,dpi=450)
plt.close()
#%%
data_dir_out = r'/home/greydon/Documents'
sub_ignore = [146]
raters = [x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x)) and 'mean' not in x]
rater_final = pd.DataFrame({})
iter_cnt = 0
for irater in raters:
patient_files = os.listdir(os.path.join(data_dir, irater))
for isub in patient_files:
sub_num = int(''.join([s for s in isub if s.isdigit()]))
fileN = os.path.join(data_dir, irater,isub, os.listdir(os.path.join(data_dir, irater,isub))[0])
data_table = pd.read_csv(fileN, skiprows=3, header=None)
data_table['rater'] = np.repeat(irater,data_table.shape[0])
data_table['subject'] = np.repeat(sub_num,data_table.shape[0])
rater_final = pd.concat([rater_final, data_table], axis = 0, ignore_index=True)
rater_final.rename(columns={0:'node_id', 1:'x', 2:'y', 3:'z', 4:'ow', 5:'ox',
6:'oy', 7:'oz', 8:'vis', 9:'sel', 10:'lock',
11:'fid', 12:'description', 13:'associatedNodeID'}, inplace=True)
Sub = pd.DataFrame({})
size = []
for r in raters:
sub_temp = np.unique(rater_final[rater_final['rater']==r]['subject'])
if sub_ignore:
sub_temp = [x for x in sub_temp if x not in sub_ignore]
data_table = pd.DataFrame({'rater': np.repeat(r,len(sub_temp)), 'subject':sub_temp})
Sub = pd.concat([Sub, data_table], axis = 0, ignore_index=True)
size.append((r,len(sub_temp)))
full_subs = set(Sub[Sub['rater']==size[0][0]]['subject'].values)
size = sorted(size, key=lambda tup: tup[1], reverse=True)
Sub_Comp = list(set(Sub[Sub['rater']==size[0][0]]['subject'].values) &
set(Sub[Sub['rater']==size[1][0]]['subject'].values))
for irate in range(2,len(raters)):
Sub_Comp = list(set(Sub_Comp) & set(Sub[Sub['rater']==size[irate][0]]['subject'].values))
Sub_Comp = sorted(Sub_Comp)
#set(full_subs).difference(Sub[Sub['rater']==size[4][0]]['subject'].values)
Data_comp = rater_final[rater_final['subject'].isin(Sub_Comp)]
Data_comp = Data_comp.sort_values(['rater','subject', 'fid'], ascending=[True, True,True])
Tot_Data = np.zeros((32,5,len(Sub_Comp),len(raters)))
for irate in range(len(raters)):
for isub in range(len(Sub_Comp)):
Tot_Data[:,:,isub,irate] = Data_comp[(Data_comp['rater']==raters[irate]) & (Data_comp['subject']==Sub_Comp[isub])].sort_values(['fid']).loc[:,['fid','x','y','z','subject']]
Tot_mean = np.mean(Tot_Data,3)
N = Tot_mean[:,:,:,np.newaxis]
Tot_diff = Tot_Data - np.tile(N,[1,1,1,len(raters)])
Tot_eudiff = np.sqrt(Tot_diff[:,1,:,:]**2 + Tot_diff[:,2,:,:]**2 + Tot_diff[:,3,:,:]**2)
Rater_AFLE = np.mean(Tot_eudiff,2).T
Rater_AFLE_mean = np.mean(Tot_eudiff,1)
Rater_AFLE_SD = np.std(Tot_eudiff,1)
Total_AFLE_mean = np.mean(Rater_AFLE_mean,1)
Total_AFLE_SD = np.std(Tot_eudiff,1)
mcp_point = pd.DataFrame({})
for r in raters:
for s in Sub_Comp:
ac = Data_comp.loc[(Data_comp['rater']==r) & (Data_comp['subject']==s) & (Data_comp['fid']==1),'x':'z'].values[0]
pc = Data_comp.loc[(Data_comp['rater']==r) & (Data_comp['subject']==s)& (Data_comp['fid']==2),'x':'z'].values[0]
mcp = (ac + pc)/2
data_table = pd.DataFrame({'rater': r, 'subject': s, 'x': mcp[0], 'y': mcp[1],'z': mcp[2]}, index=[0] )
mcp_point = pd.concat([mcp_point, data_table], axis = 0, ignore_index=True)
mcp_point_mean = mcp_point.groupby(['subject'])['x','y','z'].mean()
data_from_mcp = pd.DataFrame({})
for r in raters:
for s in Sub_Comp:
for f in range(1,33):
point = Data_comp.loc[(Data_comp['rater']==r) & (Data_comp['subject']==s) & (Data_comp['fid']==f),'x':'z'].values[0]
di = point - mcp_point_mean.loc[s,:].values
euclidean = np.sqrt(di[0]**2 + di[1]**2 + di[2]**2)
data_table = pd.DataFrame({'rater': r, 'subject': s, 'fid': f, 'distance': euclidean,'x': di[0], 'y': di[1],'z': di[2]}, index=[0] )
data_from_mcp = pd.concat([data_from_mcp, data_table], axis = 0, ignore_index=True)
data_from_mcp_avg = data_from_mcp.groupby(['rater','fid'])['x','y','z'].mean().reset_index()
#%%
sub_ignore = [146]
rater_mni_final = pd.DataFrame({})
iter_cnt = 0
for irater in raters:
patient_files = os.listdir(os.path.join(data_mni_dir, irater))
for isub in patient_files:
sub_num = int(''.join([s for s in isub if s.isdigit()]))
fileN = os.path.join(data_mni_dir, irater,isub, [x for x in os.listdir(os.path.join(data_mni_dir, irater,isub)) if x.endswith('_nlin.fcsv')][0])
data_table = pd.read_csv(fileN, skiprows=3, header=None)
data_table['rater'] = np.repeat(irater,data_table.shape[0])
data_table['subject'] = np.repeat(sub_num,data_table.shape[0])
rater_mni_final = pd.concat([rater_mni_final, data_table], axis = 0, ignore_index=True)
rater_mni_lin_final = pd.DataFrame({})
iter_cnt = 0
for irater in raters:
patient_files = os.listdir(os.path.join(data_mni_dir, irater))
for isub in patient_files:
sub_num = int(''.join([s for s in isub if s.isdigit()]))
fileN = os.path.join(data_mni_dir, irater,isub, [x for x in os.listdir(os.path.join(data_mni_dir, irater,isub)) if x.endswith('_lin.fcsv')][0])
data_table = pd.read_csv(fileN, skiprows=3, header=None)
data_table['rater'] = np.repeat(irater,data_table.shape[0])
data_table['subject'] = np.repeat(sub_num,data_table.shape[0])
rater_mni_lin_final = pd.concat([rater_mni_lin_final, data_table], axis = 0, ignore_index=True)
rater_mni_final.rename(columns={0:'node_id', 1:'x', 2:'y', 3:'z', 4:'ow', 5:'ox',
6:'oy', 7:'oz', 8:'vis', 9:'sel', 10:'lock',
11:'fid', 12:'description', 13:'associatedNodeID'}, inplace=True)
rater_mni_lin_final.rename(columns={0:'node_id', 1:'x', 2:'y', 3:'z', 4:'ow', 5:'ox',
6:'oy', 7:'oz', 8:'vis', 9:'sel', 10:'lock',
11:'fid', 12:'description', 13:'associatedNodeID'}, inplace=True)
Sub = pd.DataFrame({})
size = []
for r in raters:
sub_temp = np.unique(rater_mni_final[rater_mni_final['rater']==r]['subject'])
if sub_ignore:
sub_temp = [x for x in sub_temp if x not in sub_ignore]
data_table = pd.DataFrame({'rater': np.repeat(r,len(sub_temp)), 'subject':sub_temp})
Sub = pd.concat([Sub, data_table], axis = 0, ignore_index=True)
size.append((r,len(sub_temp)))
full_subs = set(Sub[Sub['rater']==size[0][0]]['subject'].values)
size = sorted(size, key=lambda tup: tup[1], reverse=True)
Sub_Comp = list(set(Sub[Sub['rater']==size[0][0]]['subject'].values) &
set(Sub[Sub['rater']==size[1][0]]['subject'].values))
for irate in range(2,len(raters)):
Sub_Comp = list(set(Sub_Comp) & set(Sub[Sub['rater']==size[irate][0]]['subject'].values))
Sub_Comp = sorted(Sub_Comp)
Data_mni_comp = rater_mni_final[rater_mni_final['subject'].isin(Sub_Comp)]
Data_mni_comp = Data_mni_comp.sort_values(['rater','subject', 'fid'], ascending=[True, True,True])
Data_mni_lin_comp = rater_mni_lin_final[rater_mni_lin_final['subject'].isin(Sub_Comp)]
Data_mni_lin_comp = Data_mni_lin_comp.sort_values(['rater','subject', 'fid'], ascending=[True, True,True])
Tot_Data = np.zeros((32,5,len(Sub_Comp),len(raters)))
Tot_Data_lin = np.zeros((32,5,len(Sub_Comp),len(raters)))
for irate in range(len(raters)):
for isub in range(len(Sub_Comp)):
Tot_Data[:,:,isub,irate] = Data_mni_comp[(Data_mni_comp['rater']==raters[irate]) & (Data_mni_comp['subject']==Sub_Comp[isub])].sort_values(['fid']).loc[:,['fid','x','y','z','subject']]
Tot_Data_lin[:,:,isub,irate] = Data_mni_lin_comp[(Data_mni_lin_comp['rater']==raters[irate]) & (Data_mni_lin_comp['subject']==Sub_Comp[isub])].sort_values(['fid']).loc[:,['fid','x','y','z','subject']]
MNI152NLin2009cAsym_standard = pd.read_csv('/home/greydon/Documents/GitHub/afids-clinical/data/fid_standards/MNI152NLin2009bAsym_rater_standard/MNI152NLin2009bAsym_desc-raterstandard_afids.fcsv', skiprows=2)[['label','x','y','z']].to_numpy()
N = MNI152NLin2009cAsym_standard[:,:,np.newaxis, np.newaxis]
MNI_Diff = Tot_Data[:,:4,:,:] - np.tile(N,[1,1,len(Sub_Comp),len(raters)])
MNI_AFLE = np.sqrt(MNI_Diff[:,1,:,:]**2 + MNI_Diff[:,2,:,:]**2 + MNI_Diff[:,3,:,:]**2)
MNI_Diff_lin = Tot_Data_lin[:,:4,:,:] - np.tile(N,[1,1,len(Sub_Comp),len(raters)])
MNI_AFLE_lin = np.sqrt(MNI_Diff_lin[:,1,:,:]**2 + MNI_Diff_lin[:,2,:,:]**2 + MNI_Diff_lin[:,3,:,:]**2)
MNI_AFLE_rater = np.mean(MNI_AFLE,2).T
MNI_AFLE_scan = np.mean(MNI_AFLE,1)
MNI_AFLE_total = np.mean(MNI_AFLE_rater,0)
MNI_AFLE_std = np.std(MNI_AFLE_rater,0)
np.mean(MNI_AFLE_total)
np.std(MNI_AFLE_total)
MNI_AFLE_lin_rater = np.mean(MNI_AFLE_lin,2).T
MNI_AFLE_lin_scan = np.mean(MNI_AFLE_lin,1)
MNI_AFLE_lin_total = np.mean(MNI_AFLE_lin_rater,0)
MNI_AFLE_lin_std = np.std(MNI_AFLE_lin_rater,0)
np.mean(MNI_AFLE_lin_total)
np.std(MNI_AFLE_lin_total)
df=pd.DataFrame(np.c_[
[N + P for N,P in zip([f'{x:.2f}' for x in MNI_AFLE_lin_total ],[f' ({x:.2f})' for x in MNI_AFLE_lin_std])]+[f'{np.mean(MNI_AFLE_lin_total):.2f} ({np.std(MNI_AFLE_lin_total):.2f})'],
[N + P for N,P in zip([f'{x:.2f}' for x in MNI_AFLE_total ],[f' ({x:.2f})' for x in MNI_AFLE_std])]+[f'{np.mean(MNI_AFLE_total):.2f} ({np.std(MNI_AFLE_total):.2f})']
])
print(df.to_csv(index=None, header=None))
#%%
goldStandard = "MA"
rater = 1
gold_stand_data = Data_comp[Data_comp['rater'].isin([goldStandard])].reset_index()
single_rater_data = Data_comp[Data_comp['rater'].isin([raters[rater]])].reset_index()
Coor_Diff = gold_stand_data.loc[:,'x':'z'].values - (single_rater_data.loc[:,'x':'z'].values)
rater_error = np.sqrt(Coor_Diff[:,0]**2 + Coor_Diff[:,1]**2 + Coor_Diff[:,2]**2)
single_rater_data['rater_error'] = rater_error
error_idx = single_rater_data['rater_error'] > 5.0
check_data = pd.DataFrame({'subject': single_rater_data.loc[error_idx,'subject'].values,
'fid': single_rater_data.loc[error_idx,'fid'].values,
'x': single_rater_data.loc[error_idx,'x'].values,
'y': single_rater_data.loc[error_idx,'y'].values,
'z': single_rater_data.loc[error_idx,'z'].values,
'x_diff': Coor_Diff[error_idx,0],
'y_diff': Coor_Diff[error_idx,1],
'z_diff': Coor_Diff[error_idx,2]})
check_data = check_data.sort_values(['subject', 'fid'], ascending=[True, True])
#%%
GS_raters = ["GG", "MA"]
NGS_raters = [x for x in raters if x not in GS_raters]
GS_mean = Data_comp[Data_comp['rater'].isin(GS_raters)].groupby(['subject','fid'])['x','y','z'].mean().reset_index()
NGS_mean = Data_comp[Data_comp['rater'].isin(NGS_raters)].groupby(['subject','fid'])['x','y','z'].mean().reset_index()
GS_Diff = GS_mean.loc[:,'x':'z'].values - NGS_mean.loc[:,'x':'z'].values
GS_error_rate = np.sqrt(GS_Diff[:,0]**2 + GS_Diff[:,1]**2 + GS_Diff[:,2]**2)
GS_Diff_mean = pd.DataFrame(np.c_[GS_Diff, GS_mean['subject'].values, GS_mean['fid'].values]).groupby([4])[0,1,2].mean()
GS_total_mean = GS_mean.groupby(['fid'])['x','y','z'].mean().reset_index()
rater_mean = Data_comp.groupby(['rater','fid'])['x','y','z'].mean().reset_index()
#%%
plot_fiducials(rater_mean, GS_total_mean, data_dir_out, 1, False)
plot_fiducials(rater_mean, GS_total_mean, data_dir_out, 2, False)
plot_fiducials(data_from_mcp_avg, None, data_dir_out, 3, False)
#%%
comparisons = [("GG", 'MA'),("GG", 'AT'),("GG", 'RC'),("GG", 'MJ'),("MA", 'AT'),
("MA", 'RC'),("MA", 'MJ'),("AT", 'RC'),("AT", 'MJ'),("RC", 'MJ')]
max_val = 8.0
fig, axes = plt.subplots(4, 2)
plot_cnt = 0
for irow in range(4):
for icol in range(2):
rater_1 = comparisons[plot_cnt][0]
rater_2 = comparisons[plot_cnt][1]
rater_1_data = Data_comp[Data_comp['rater'].isin([rater_1])].reset_index()
rater_2_data = Data_comp[Data_comp['rater'].isin([rater_2])].reset_index()
rater_coor_Diff = rater_1_data.loc[:,'x':'z'].values.astype(float) - rater_2_data.loc[:,'x':'z'].values.astype(float)
rater_coor_error = pd.DataFrame(np.sqrt(rater_coor_Diff[:,0]**2 + rater_coor_Diff[:,1]**2 + rater_coor_Diff[:,2]**2))
rater_coor_error.rename(columns={0:'error'}, inplace=True)
fid_names = [fid_dic[x] for x in np.unique(rater_1_data['fid'].values.astype(int))]
rater_coor_error['name']= fid_names*int(len(rater_coor_Diff)/len(fid_names))
rater_coor_error['fid']= list(np.unique(rater_1_data['fid'].values.astype(int)))*int(len(rater_coor_Diff)/len(fid_names))
rater_coor_error_plot = | pd.DataFrame({}) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,ifis_tools//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # database_tools:
#
# Set of tools to connect to the data base, put and get data from them.
import psycopg2
from psycopg2 import sql
import pandas as pd
from datetime import datetime
from climata.usgs import InstantValueIO, DailyValueIO
import numpy as np
from ifis_tools import auxiliar as aux
data_usr = None
data_pass = None
data_host = "s-iihr51.iihr.uiowa.edu"
data_base = "research_environment"
data_port = "5435"
# +
def DataBaseConnect(user = "iihr_student", password = "<PASSWORD>", host = data_host,
port = "5435", database = "research_environment"):
'''Connect to the database that hsa stored the usgs information'''
con = psycopg2.connect(user = user,
password = password,
host = host,
port = port,
database = database)
return con
def SQL_getSubLinks(linkid):
'''returns the list of links that belong to a certain link.'''
con = DataBaseConnect(user='nicolas',password='<PASSWORD>',database='rt_precipitation')
query = 'SELECT nodeX.link_id AS link_id FROM students.env_master_km AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) AND parentX.link_id = '+str(linkid)
Data = pd.read_sql(query, con)
Data = Data.values.T[0]
Data.sort()
con.close()
return Data
def SQL_read_USGS_Streamflow(usgs_id, date1, date2, schema = 'pers_nico',
table = 'data_usgs', time_name = 'unix_time', data_name = 'val', usgs_name = 'usgs_id'):
'''Read streamflow data from IIHR database "research_environment"
and returns it as a pandas.DataFrame element.
Parameters:
- usgs_id: code of the usgs.
- date1: initial date of the query.
- date2: final date of the query.
Optional:
- schema: where to obtain data in the databse.
- table: master table with the usgs data.
- time_name: the name of the column that has the time.
- data_name: the name of the column that has the data.
- usgs_name: the name of the column that has the id of the usgs stations.
Returns:
- pandas.DataFrame containing the streamflow data.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '<PASSWORD>')
#Work with dates and usgs id
date1 = str(aux.__datetime2unix__(date1))
date2 = str(aux.__datetime2unix__(date2))
if type(usgs_id) is not str:
usgs_id = str(usgs_id)
#make the querty
query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+usgs_name+"='"+usgs_id+"'")
#Make the consult.
Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}})
con.close()
return Data
def WEB_Get_USGS(usgs_code, date1, date2, variable = '00060'):
'''Get USGS data from the web using the climdata interface
Parameters (debe ser probado):
- usgs_code: the code of the station to obtain.
- date1: initial date.
- date2: final date.
- variable:
- 00060 for streamflow.
- 00065 for height'''
#Get the data form the web
data = InstantValueIO(
start_date = pd.Timestamp(date1),
end_date = pd.Timestamp(date2),
station = usgs_code,
parameter = "00060")
try:
#Convert the data into a pandas series
for series in data:
flow = [r[0] for r in series.data]
dates = [r[1] for r in series.data]
#Obtain the series of pandas
Q = pd.Series(flow, pd.to_datetime(dates, utc=True)) * 0.02832
Index = [d.replace(tzinfo = None) for d in Q.index]
Q.index = Index
except:
#Convert the data into a pandas series
for series in data:
flow = [r[1] for r in series.data]
dates = [r[0] for r in series.data]
#Obtain the series of pandas
Q = pd.Series(flow, pd.to_datetime(dates, utc=True)) * 0.02832
Index = [d.replace(tzinfo = None) for d in Q.index]
Q.index = Index
return Q
#SQL Query to obtain the data from per_felipe.pois_adv_geom
def SQL_USGS_at_IFIS():
'''Return the list of the usgs stations in the IFIS system and the linkID where they
belong.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '<PASSWORD>')
#Query for the stations
query = sql.SQL("SELECT foreign_id,link_id FROM pers_felipe.pois_adv_geom where type in (2,3) and foreign_id like '0%' AND link_id < 620000")
#make the consult
cur = con.cursor()
cur.execute(query)
L = cur.fetchall()
cur.close()
con.close()
#Obtains a dictionary in which stations are the key
DicUSGSinIFIS = {}
for l in L:
DicUSGSinIFIS.update({l[0]:l[1]})
return DicUSGSinIFIS
def SQL_USGS_at_MATC():
'''Return the list of stations that are in the databse pers_nico (matc).'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '<PASSWORD>')
#Make the query
query = sql.SQL("SELECT DISTINCT(usgs_id) FROM pers_nico.data_usgs_2008")
cur = con.cursor()
cur.execute(query)
L = cur.fetchall()
cur.close()
con.close()
return [l[0] for l in L]
def SQL_Get_linkArea(linkID, upArea = True):
'''Obtains the up area for a link ID'''
#The query and the obtentions
con = DataBaseConnect('nicolas','10A28Gir0',database='restore_res_env_92')
cur = con.cursor()
if upArea:
q = sql.SQL("SELECT up_area FROM public.env_master_km where link_id="+str(linkID))
else:
q = sql.SQL("SELECT area FROM public.env_master_km where link_id="+str(linkID))
cur.execute(q)
A = cur.fetchall()
cur.close()
con.close()
return A[0][0]
def SQL_Get_Coordinates(linkID):
con = DataBaseConnect(user='nicolas',password='<PASSWORD>')
cur = con.cursor()
LatLng = {}
query = sql.SQL('SELECT lat, lng FROM pers_felipe.pois_adv_geom where link_id = '+str(linkID))
cur.execute(query)
Coord = cur.fetchall()
con.close()
return float(Coord[0][0]),float(Coord[0][1])
def SQL_Read_MeanRainfall(link_id, date1, date2, schema = 'pers_nico',
table = 's4mrain', time_name = 'unix_time', data_name = 'rain', linkid_name = 'link_id'):
'''DEPRECATED Read streamflow data from IIHR database "research_environment"
and returns it as a pandas.DataFrame element.
Parameters:
- usgs_id: code of the usgs.
- date1: initial date of the query.
- date2: final date of the query.
Optional:
- schema: where to obtain data in the databse.
- table: master table with the usgs data.
- time_name: the name of the column that has the time.
- data_name: the name of the column that has the data.
- usgs_name: the name of the column that has the id of the usgs stations.
Returns:
- pandas.DataFrame containing the streamflow data.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '<PASSWORD>')
#Work with dates and usgs id
date1 = str(aux.__datetime2unix__(date1))
date2 = str(aux.__datetime2unix__(date2))
if type(link_id) is not str:
link_id = str(link_id)
#make the querty
query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+linkid_name+"='"+link_id+"'")
#Make the consult.
Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}})
con.close()
#Organize rainfall
Data = Data.sort_index()
Dates = | pd.date_range(Data.index[0], Data.index[-1], freq='1h') | pandas.date_range |
"""Integration tests for the HyperTransformer."""
import re
from copy import deepcopy
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from rdt import HyperTransformer
from rdt.errors import Error, NotFittedError
from rdt.transformers import (
DEFAULT_TRANSFORMERS, BaseTransformer, BinaryEncoder, FloatFormatter, FrequencyEncoder,
OneHotEncoder, UnixTimestampEncoder, get_default_transformer, get_default_transformers)
class DummyTransformerNumerical(BaseTransformer):
INPUT_SDTYPE = 'categorical'
OUTPUT_SDTYPES = {
'value': 'float'
}
def _fit(self, data):
pass
def _transform(self, data):
return data.astype(float)
def _reverse_transform(self, data):
return data.astype(str)
class DummyTransformerNotMLReady(BaseTransformer):
INPUT_SDTYPE = 'datetime'
OUTPUT_SDTYPES = {
'value': 'categorical',
}
def _fit(self, data):
pass
def _transform(self, data):
# Stringify input data
return data.astype(str)
def _reverse_transform(self, data):
return data.astype('datetime64')
TEST_DATA_INDEX = [4, 6, 3, 8, 'a', 1.0, 2.0, 3.0]
def get_input_data():
datetimes = pd.to_datetime([
'2010-02-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
return data
def get_transformed_data():
datetimes = [
1.264982e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
return pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, .8125, 0.8125, 0.3125, 0.8125, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
def get_reversed_data():
data = get_input_data()
data['bool'] = data['bool'].astype('object')
return data
DETERMINISTIC_DEFAULT_TRANSFORMERS = deepcopy(DEFAULT_TRANSFORMERS)
DETERMINISTIC_DEFAULT_TRANSFORMERS['categorical'] = FrequencyEncoder
@patch('rdt.transformers.DEFAULT_TRANSFORMERS', DETERMINISTIC_DEFAULT_TRANSFORMERS)
def test_hypertransformer_default_inputs():
"""Test the HyperTransformer with default parameters.
This tests that if default parameters are provided to the HyperTransformer,
the ``default_transformers`` method will be used to determine which
transformers to use for each field.
Setup:
- Patch the ``DEFAULT_TRANSFORMERS`` to use the ``FrequencyEncoder``
for categorical sdtypes, so that the output is predictable.
Input:
- A dataframe with every sdtype.
- A fixed random seed to guarantee the samle values are null.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
datetimes = pd.to_datetime([
np.nan,
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, np.nan, 0.1, 0.4, np.nan, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, np.nan, False, True, False, np.nan, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_datetimes = [
1.263069e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
expected_transformed = pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, 0.9375, 0.75, 0.3125, 0.75, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': expected_datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
pd.testing.assert_frame_equal(transformed, expected_transformed)
reversed_datetimes = pd.to_datetime([
'2010-01-09 20:34:17.142857216',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
expected_reversed = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.20000000000000004, 0.1, 0.4, 0.20000000000000004, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': reversed_datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
for row in range(reverse_transformed.shape[0]):
for column in range(reverse_transformed.shape[1]):
expected = expected_reversed.iloc[row, column]
actual = reverse_transformed.iloc[row, column]
assert pd.isna(actual) or expected == actual
assert isinstance(ht._transformers_tree['integer']['transformer'], FloatFormatter)
assert ht._transformers_tree['integer']['outputs'] == ['integer.value']
assert isinstance(ht._transformers_tree['float']['transformer'], FloatFormatter)
assert ht._transformers_tree['float']['outputs'] == ['float.value']
assert isinstance(ht._transformers_tree['categorical']['transformer'], FrequencyEncoder)
assert ht._transformers_tree['categorical']['outputs'] == ['categorical.value']
assert isinstance(ht._transformers_tree['bool']['transformer'], BinaryEncoder)
assert ht._transformers_tree['bool']['outputs'] == ['bool.value']
assert isinstance(ht._transformers_tree['datetime']['transformer'], UnixTimestampEncoder)
assert ht._transformers_tree['datetime']['outputs'] == ['datetime.value']
assert isinstance(ht._transformers_tree['names']['transformer'], FrequencyEncoder)
assert ht._transformers_tree['names']['outputs'] == ['names.value']
get_default_transformers.cache_clear()
get_default_transformer.cache_clear()
def test_hypertransformer_field_transformers():
"""Test the HyperTransformer with ``field_transformers`` provided.
This tests that the transformers specified in the ``field_transformers``
argument are used. Any output of a transformer that is not ML ready (not
in the ``_transform_output_sdtypes`` list) should be recursively transformed
till it is.
Setup:
- The datetime column is set to use a dummy transformer that stringifies
the input. That output is then set to use the categorical transformer.
Input:
- A dict mapping each field to a transformer.
- A dataframe with every sdtype.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
config = {
'sdtypes': {
'integer': 'numerical',
'float': 'numerical',
'categorical': 'categorical',
'bool': 'boolean',
'datetime': 'datetime',
'names': 'categorical'
},
'transformers': {
'integer': FloatFormatter(missing_value_replacement='mean'),
'float': FloatFormatter(missing_value_replacement='mean'),
'categorical': FrequencyEncoder,
'bool': BinaryEncoder(missing_value_replacement='mode'),
'datetime': DummyTransformerNotMLReady,
'names': FrequencyEncoder
}
}
data = get_input_data()
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.set_config(config)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_transformed = get_transformed_data()
rename = {'datetime.value': 'datetime.value.value'}
expected_transformed = expected_transformed.rename(columns=rename)
transformed_datetimes = [0.8125, 0.8125, 0.3125, 0.3125, 0.3125, 0.8125, 0.3125, 0.3125]
expected_transformed['datetime.value.value'] = transformed_datetimes
pd.testing.assert_frame_equal(transformed, expected_transformed)
expected_reversed = get_reversed_data()
pd.testing.assert_frame_equal(expected_reversed, reverse_transformed)
def test_single_category():
"""Test that categorical variables with a single value are supported."""
# Setup
ht = HyperTransformer()
data = pd.DataFrame({
'a': ['a', 'a', 'a']
})
# Run
ht.detect_initial_config(data)
ht.update_transformers(column_name_to_transformer={
'a': OneHotEncoder()
})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(data, reverse)
def test_dtype_category():
"""Test that categorical variables of dtype category are supported."""
# Setup
data = pd.DataFrame({'a': ['a', 'b', 'c']}, dtype='category')
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
# Assert
pd.testing.assert_frame_equal(reverse, data)
def test_multiple_fits():
"""HyperTransformer should be able to be used multiple times.
Fitting, transforming and reverse transforming should produce the same results when
called on the same data multiple times.
"""
# Setup
data = get_input_data()
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
transformed1 = ht.transform(data)
reversed1 = ht.reverse_transform(transformed1)
ht.detect_initial_config(data)
ht.fit(data)
transformed2 = ht.transform(data)
reversed2 = ht.reverse_transform(transformed2)
# Assert
pd.testing.assert_frame_equal(transformed1, transformed2)
pd.testing.assert_frame_equal(reversed1, reversed2)
def test_multiple_fits_different_data():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = pd.DataFrame({'col1': [1, 2, 3], 'col2': [1.0, 0.0, 0.0]})
new_data = pd.DataFrame({'col2': [1, 2, 3], 'col1': [1.0, 0.0, 0.0]})
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
ht.detect_initial_config(new_data)
ht.fit(new_data)
transformed1 = ht.transform(new_data)
transformed2 = ht.transform(new_data)
reverse1 = ht.reverse_transform(transformed1)
reverse2 = ht.reverse_transform(transformed2)
# Assert
expected_transformed = pd.DataFrame({'col2.value': [1, 2, 3], 'col1.value': [1.0, 0.0, 0.0]})
pd.testing.assert_frame_equal(transformed1, expected_transformed)
pd.testing.assert_frame_equal(transformed2, expected_transformed)
pd.testing.assert_frame_equal(reverse1, new_data)
pd.testing.assert_frame_equal(reverse2, new_data)
def test_multiple_fits_different_columns():
"""HyperTransformer should be able to be used multiple times regardless of the data.
Fitting, transforming and reverse transforming should work when called on different data.
"""
# Setup
data = pd.DataFrame({'col1': [1, 2, 3], 'col2': [1.0, 0.0, 0.0]})
new_data = pd.DataFrame({'col3': [1, 2, 3], 'col4': [1.0, 0.0, 0.0]})
ht = HyperTransformer()
# Run
ht.detect_initial_config(data)
ht.fit(data)
ht.detect_initial_config(new_data)
ht.fit(new_data)
transformed1 = ht.transform(new_data)
transformed2 = ht.transform(new_data)
reverse1 = ht.reverse_transform(transformed1)
reverse2 = ht.reverse_transform(transformed2)
# Assert
expected_transformed = pd.DataFrame({'col3.value': [1, 2, 3], 'col4.value': [1.0, 0.0, 0.0]})
pd.testing.assert_frame_equal(transformed1, expected_transformed)
pd.testing.assert_frame_equal(transformed2, expected_transformed)
pd.testing.assert_frame_equal(reverse1, new_data)
| pd.testing.assert_frame_equal(reverse2, new_data) | pandas.testing.assert_frame_equal |
import pandas as pd
import logging
import numpy as np
import collections
import configparser
import shutil
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import requests
import io
from astropy.io import fits
from astropy.time import Time
from pathlib import Path
from matplotlib.backends.backend_pdf import PdfPages
import sphere
import sphere.utils as utils
import sphere.toolbox as toolbox
_log = logging.getLogger(__name__)
# WFS wavelength
wave_wfs = 500e-9
class Reduction(object):
'''
SPHERE/SPARTA dataset reduction class
The analysis and plotting code of this class was originally
developed by <NAME> (ESO/IPAG) and based on SAXO tools
from Jean-<NAME> (ONERA). See:
https://github.com/jmilou/sparta
for the code from <NAME>.
'''
##################################################
# Class variables
##################################################
# specify for each recipe which other recipes need to have been executed before
recipe_requirements = collections.OrderedDict([
('sort_files', []),
('sph_sparta_dtts', ['sort_files']),
('sph_sparta_wfs_parameters', ['sort_files']),
('sph_sparta_atmospheric_parameters', ['sort_files']),
('sph_query_databases', ['sort_files']),
('sph_sparta_plot', ['sort_files', 'sph_sparta_dtts', 'sph_sparta_wfs_parameters', 'sph_sparta_atmospheric_parameters']),
('sph_sparta_clean', [])
])
##################################################
# Constructor
##################################################
def __new__(cls, path, log_level='info', sphere_handler=None):
'''
Custom instantiation for the class
The customized instantiation enables to check that the
provided path is a valid reduction path. If not, None will be
returned for the reduction being created. Otherwise, an
instance is created and returned at the end.
Parameters
----------
path : str
Path to the directory containing the dataset
level : {'debug', 'info', 'warning', 'error', 'critical'}
The log level of the handler
sphere_handler : log handler
Higher-level SPHERE.Dataset log handler
'''
#
# make sure we are dealing with a proper reduction directory
#
# init path
path = Path(path).expanduser().resolve()
# zeroth-order reduction validation
raw = path / 'raw'
if not raw.exists():
_log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path))
return None
else:
reduction = super(Reduction, cls).__new__(cls)
#
# basic init
#
# init path
reduction._path = utils.ReductionPath(path)
# instrument and mode
reduction._instrument = 'SPARTA'
#
# logging
#
logger = logging.getLogger(str(path))
logger.setLevel(log_level.upper())
if logger.hasHandlers():
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8')
formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
logger.addHandler(handler)
if sphere_handler:
logger.addHandler(sphere_handler)
reduction._logger = logger
reduction._logger.info('Creating SPARTA reduction at path {}'.format(path))
#
# configuration
#
reduction._logger.debug('> read default configuration')
configfile = f'{Path(sphere.__file__).parent}/instruments/{reduction._instrument}.ini'
config = configparser.ConfigParser()
reduction._logger.debug('Read configuration')
config.read(configfile)
# reduction parameters
reduction._config = dict(config.items('reduction'))
for key, value in reduction._config.items():
try:
val = eval(value)
except NameError:
val = value
reduction._config[key] = val
#
# reduction and recipe status
#
reduction._status = sphere.INIT
reduction._recipes_status = collections.OrderedDict()
for recipe in reduction.recipe_requirements.keys():
reduction._update_recipe_status(recipe, sphere.NOTSET)
# reload any existing data frames
reduction._read_info()
reduction._logger.warning('#########################################################')
reduction._logger.warning('# WARNING! #')
reduction._logger.warning('# Support for SPARTA files is preliminary. The current #')
reduction._logger.warning('# format of product files may change in future versions #')
reduction._logger.warning('# of the pipeline until an appropriate format is found. #')
reduction._logger.warning('# Please do not blindly rely on the current format. #')
reduction._logger.warning('#########################################################')
#
# return instance
#
return reduction
##################################################
# Representation
##################################################
def __repr__(self):
return '<Reduction, instrument={}, path={}, log={}>'.format(self._instrument, self._path, self.loglevel)
def __format__(self):
return self.__repr__()
##################################################
# Properties
##################################################
@property
def loglevel(self):
return logging.getLevelName(self._logger.level)
@loglevel.setter
def loglevel(self, level):
self._logger.setLevel(level.upper())
@property
def instrument(self):
return self._instrument
@property
def path(self):
return self._path
@property
def files_info(self):
return self._files_info
@property
def dtts_info(self):
return self._dtts_info
@property
def visloop_info(self):
return self._visloop_info
@property
def irloop_info(self):
return self._irloop_info
@property
def atmospheric_info(self):
return self._atmos_info
@property
def recipe_status(self):
return self._recipes_status
@property
def config(self):
return self._config
@property
def status(self):
return self._status
##################################################
# Private methods
##################################################
def _read_info(self):
'''
Read the files, calibs and frames information from disk
files_info : dataframe
The data frame with all the information on files
This function is not supposed to be called directly by the user.
'''
self._logger.info('Read existing reduction information')
# path
path = self.path
# files info
fname = path.preproc / 'files.csv'
if fname.exists():
self._logger.debug('> read files.csv')
files_info = pd.read_csv(fname, index_col=0)
# convert times
files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False)
files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False)
# update recipe execution
self._update_recipe_status('sort_files', sphere.SUCCESS)
else:
files_info = None
# DTTS info
fname = path.products / 'dtts_frames.csv'
if fname.exists():
self._logger.debug('> read dtts_frames.csv')
dtts_info = pd.read_csv(fname, index_col=0)
# convert times
dtts_info['DATE-OBS'] = pd.to_datetime(dtts_info['DATE-OBS'], utc=False)
dtts_info['DATE'] = pd.to_datetime(dtts_info['DATE'], utc=False)
dtts_info['TIME'] = pd.to_datetime(dtts_info['TIME'], utc=False)
# update recipe execution
self._update_recipe_status('sph_sparta_dtts', sphere.SUCCESS)
else:
dtts_info = None
# VisLoop info
fname = path.products / 'visloop_info.csv'
visloop = False
if fname.exists():
self._logger.debug('> read visloop_info.csv')
visloop_info = pd.read_csv(fname, index_col=0)
# convert times
visloop_info['DATE-OBS'] = pd.to_datetime(visloop_info['DATE-OBS'], utc=False)
visloop_info['DATE'] = pd.to_datetime(visloop_info['DATE'], utc=False)
visloop_info['TIME'] = pd.to_datetime(visloop_info['TIME'], utc=False)
visloop = True
else:
visloop_info = None
# IRLoop info
fname = path.products / 'irloop_info.csv'
irloop = False
if fname.exists():
self._logger.debug('> read irloop_info.csv')
irloop_info = pd.read_csv(fname, index_col=0)
# convert times
irloop_info['DATE-OBS'] = pd.to_datetime(irloop_info['DATE-OBS'], utc=False)
irloop_info['DATE'] = pd.to_datetime(irloop_info['DATE'], utc=False)
irloop_info['TIME'] = pd.to_datetime(irloop_info['TIME'], utc=False)
irloop = True
else:
irloop_info = None
# update recipe execution
if visloop and irloop:
self._update_recipe_status('sph_sparta_wfs_parameters', sphere.SUCCESS)
else:
self._update_recipe_status('sph_sparta_wfs_parameters', sphere.NOTSET)
# Atmospheric info
fname = path.products / 'atmospheric_info.csv'
if fname.exists():
self._logger.debug('> read atmospheric_info.csv')
atmos_info = pd.read_csv(fname, index_col=0)
# convert times
atmos_info['DATE-OBS'] = pd.to_datetime(atmos_info['DATE-OBS'], utc=False)
atmos_info['DATE'] = pd.to_datetime(atmos_info['DATE'], utc=False)
atmos_info['TIME'] = | pd.to_datetime(atmos_info['TIME'], utc=False) | pandas.to_datetime |
from datetime import date
from google.oauth2 import service_account
from googleapiclient.discovery import build
import numpy as np
from repo_issues_dc import IssueReport as IR
import pandas as pd
import pathlib
credentials = service_account.Credentials.from_service_account_file(
str(pathlib.Path("auth/issue-report-generation-ff9748b57ae2.json"))
)
#Ignore unless working within an IDE
# credentials = service_account.Credentials.from_service_account_file(
# str(pathlib.Path("../auth/issue-report-generation-ff9748b57ae2.json"))
# )
scopes = credentials.with_scopes(
[
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive",
]
)
def create_sheet(title, repo_name, repo_owner, data):
sheets_service = build("sheets", "v4", credentials=credentials)
sheets = sheets_service.spreadsheets()
# create_body = {"properties": {"title": f"{title} {date.today()}"},
# "sheets": list(map(lambda d: {"properties": {"title": d.get("title")}}, data))}
create_body = {"properties": {"title": f"{title}: {repo_owner}/{repo_name}"},
"sheets": list(map(lambda d: {"properties": {"title": d.get("title")}}, data))}
res = sheets.create(body=create_body).execute()
spreadsheet_id = res.get("spreadsheetId")
def df_to_sheet(df):
df_columns = [np.array(df.columns)]
df_values = df.values.tolist()
df_to_sheet = np.concatenate((df_columns, df_values)).tolist()
return df_to_sheet
update_body = {
"valueInputOption": "RAW",
"data": list(map(lambda d: {"range": d.get("title"), "values": df_to_sheet(d.get("df"))}, data))
}
sheets.values().batchUpdate(spreadsheetId=spreadsheet_id, body=update_body).execute()
return res
def share_spreadsheet(spreadsheet_id, options, notify=False):
drive_service = build("drive", "v3", credentials=credentials)
res = (
drive_service.permissions()
.create(
fileId=spreadsheet_id,
body=options,
sendNotificationEmail=notify,
)
.execute()
)
return res
def generate_spreadsheet_link(issue_list: list) -> str:
df = | pd.DataFrame(issue_list) | pandas.DataFrame |
import itertools
import pandas as pd
from sklearn import preprocessing
from learntools.core import *
class InteractionFeatures(CodingProblem):
_vars = ['clicks', 'interactions']
_hint = ("The easiest way to loop through the pairs is with itertools.combinations. "
"Once you have that working, for each pair of columns convert them to strings "
"then you can join them with the `+` operator. It's usually good to join with "
"a symbol like _ inbetween to ensure unique values. Now you should have a column "
"of new categorical values, you can label encoder those and add them to the "
"DataFrame")
_solution = CS("""
cat_features = ['ip', 'app', 'device', 'os', 'channel']
interactions = pd.DataFrame(index=clicks.index)
for col1, col2 in itertools.combinations(cat_features, 2):
new_col_name = '_'.join([col1, col2])
# Convert to strings and combine
new_values = clicks[col1].map(str) + "_" + clicks[col2].map(str)
encoder = preprocessing.LabelEncoder()
interactions[new_col_name] = encoder.fit_transform(new_values)
""")
def check(self, clicks, interactions_):
# interactions_ is the student's version
#%%RM_IF(PROD)%%
cat_features = ['ip', 'app', 'device', 'os', 'channel']
interactions = pd.DataFrame(index=clicks.index)
for col1, col2 in itertools.combinations(cat_features, 2):
new_col_name = '_'.join([col1, col2])
# Convert to strings and combine
new_values = clicks[col1].map(str) + "_" + clicks[col2].map(str)
encoder = preprocessing.LabelEncoder()
interactions[new_col_name] = encoder.fit_transform(new_values)
assert interactions.equals(interactions_)
class PastEventsFeature(CodingProblem):
_vars = ['count_past_events', 'clicks']
_hint = ("You can get a rolling time window using .rolling(), but first you "
"need to convert the index to a time series. The current row is "
"included in the window, but we want to count all the events before "
"the current row, so be sure to adjust the count.")
_solution = CS("""
def count_past_events(series, time_window='6H'):
series = pd.Series(series.index, index=series)
# Subtract 1 so the current event isn't counted
past_events = series.rolling(time_window).count() - 1
return past_events
""")
def check(self, student_func, clicks):
def count_past_events(series, time_window='6H'):
series = | pd.Series(series.index, index=series) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu May 22 10:14:40 2019
@author : Natacha
"""
from matplotlib.lines import Line2D
import datetime
import pandas as pd
import datetime
import numpy as np
from matplotlib import pyplot as plt
import glob
from astropy.io import ascii
import matplotlib.dates as mdates
from astropy.table import Table, Column
"""
Lifeguard reports data
"""
class time:
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
def jan_to_01(self):
list_name=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
list_numbers=['1','2','3','4','5','6','7','8','9','10','11','12']
for i in range(len(list_name)):
if self.month==list_name[i]:
self.month=list_numbers[i]
def DayEqual(object1, object2):
if object1.day==object2.day and object1.month==object2.month and object1.year==object2.year:
return True
else:
return False
def GetVariables(filename):
"""
Return date, water temp, #of bluebottles of a file
"""
date, datee, water_temp, bluebottles, description = [], [], [], [], []
for i in range(0,len(filename)):
day=''
month=''
year=''
date.append(str(filename.Name[i][:-12]))
for j in range(0,2):
if(date[i][j]!='/'):
day+=date[i][j]
for j in range(2,len(date[i])-4):
if(date[i][j]!='/'):
month+=date[i][j]
for j in range(len(date[i])-4,len(date[i])):
if(date[i][j]!='/'):
year+=date[i][j]
if filename.Water_temp[i]!=14: #dont take values for water_temp=14C
datee.append(time(str(day),str(month),str(year)))
water_temp.append(filename.Water_temp[i])
description.append(filename.Description[i])
if filename.Bluebottles[i]=='none' or filename.Bluebottles[i]=='likely':
bluebottles.append(0.)
elif filename.Bluebottles[i]=='some':
bluebottles.append(1.)
elif filename.Bluebottles[i]=='many':
bluebottles.append(2.)
middle_date = []
final_date, final_water_temp, final_bluebottles, final_description = [], [], [], []
for l in range(len(datee)):
middle_date.append(datetime.date(int(datee[l].year), int(datee[l].month), int(datee[l].day)))
final_date.append(middle_date[0])
final_water_temp.append(water_temp[0])
final_bluebottles.append(bluebottles[0])
final_description.append(description[0])
for l in range(1,len(middle_date)):
if middle_date[l]!=middle_date[l-1]: #to only have one value per day
final_date.append(middle_date[l])
final_water_temp.append(water_temp[l])
final_bluebottles.append(bluebottles[l])
final_description.append(description[l])
return final_date, final_water_temp, final_bluebottles, final_description
def TableDiff(date1,date2,file1,file2):
"""
Returns a table showing every element file1 and file2 when they are different at the same date
Last row is the number of days with the same nb of bluebottles out of all the days
"""
equal=0
diff=0
date=[]
first_beach=[]
second_beach=[]
print('First beach :')
first = input()
print('2nd beach :')
second = input()
for i in range(len(date1)):
for j in range(len(date2)):
if (date1[i]==date2[j]):
if int(file1[i])==int(file2[j]):
equal+=1
else:
diff+=1
date.append(date1[i])
first_beach.append(file1[i])
second_beach.append(file2[j])
t=Table([date,first_beach,second_beach], names=('date',first,second))
total=equal+diff
t.add_row((0, equal, total))
# ascii.write(t, '../outputs_observation_data/diff'+first+second+'.csv', format='csv', fast_writer=False, overwrite=True)
def PlotTemp():
"""
Save fig of number of bluebottles depending on time and water temperature
"""
location=['Clovelly','Coogee','Maroubra']
years = mdates.YearLocator() # every year
months = mdates.MonthLocator(range(0, 12), interval=2) # every 2month
years_fmt = mdates.DateFormatter('%Y')
month_fmt = mdates.DateFormatter('%m')
for i in range(len(bluebottles)):
fig=plt.figure(figsize=(12,9))
for j in range(len(bluebottles[i])):
if bluebottles[i][j]==1:
somemany=plt.scatter(date[i][j], water_temp[i][j], color='dodgerblue',s=16,marker='o')
# elif bluebottles[i][j]==0.5:
# likely=plt.scatter(date[i][j], water_temp[i][j], color='lightskyblue',s=12,alpha=0, marker='o')
else:
none=plt.scatter(date[i][j], water_temp[i][j], color='lightgrey', marker='o',s=16,alpha=0.3)
ax=plt.axes()
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_minor_formatter(month_fmt)
ax.tick_params(labelsize=12)
fig.autofmt_xdate()
plt.ylabel('Water temperature (celsius)', fontsize=16)
plt.axvline(x=datetime.date(2017,6,1))
plt.axvline(x=datetime.date(2017,9,1))
plt.axvline(x=datetime.date(2018,6,1))
plt.axvline(x=datetime.date(2018,9,1))
plt.title('Bluebottles observations : '+str(location[i]), fontsize=16)
plt.legend((somemany, none),#likely,
('observed','none'),# 'likely',
scatterpoints=1,
loc='upper left',
ncol=3,
fontsize=16)
plt.show()
fig.savefig("../../writing/plot_temp"+str(location[i])+".png",dpi=300)
def TableMonthBeach():
"""
save csv files for each month and for each beach, with the nb of
some, many, likely, none and the percentage of none
"""
observed_month=[0,0,0]
likely_month=[0,0,0]
none_month=[0,0,0]
percentage_none_month=[0,0,0]
location=['Clovelly','Coogee','Maroubra']
yearr=[2016,2017,2018,2019]
month=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
monthh=[1,2,3,4,5,6,7,8,9,10,11,12]
f, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharey=True, sharex=True)
ax=[ax1, ax2, ax3, ax4]
xbar=np.arange(0,12)
width=0.2
plt.xticks(xbar, ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'))
for y in range(len(yearr)):
for i in range(len(date)):
observed_month[i]=[0 for i in range(0,12)]
likely_month[i]=[0 for i in range(0,12)]
none_month[i]=[0 for i in range(0,12)]
percentage_none_month[i]=[0 for i in range(0,12)]
for i in range(len(date)):
for j in range(len(date[i])):
for m in range(len(monthh)):
if date[i][j].year==yearr[y]:
if date[i][j].month==monthh[m]:
if bluebottles[i][j]==1.:
observed_month[i][m]+=1
elif bluebottles[i][j]==2:
observed_month[i][m]+=2
elif bluebottles[i][j]==0.5:
likely_month[i][m]+=1
elif bluebottles[i][j]==0.:
none_month[i][m]+=1
percentage_none_month[i][m]=np.divide(100.*none_month[i][m],observed_month[i][m]+likely_month[i][m]+none_month[i][m])
month_beach=Table([month,observed_month[i][:12],likely_month[i][:12],none_month[i][:12], percentage_none_month[i][:12]],names=('Month','Observed','Likely','Noone','% of None'))
ascii.write(month_beach, '../outputs_observation_data/new_monthly_bluebottles_'+str(yearr[y])+'_'+location[i]+'.csv', format='csv', fast_writer=False, overwrite=True)
ax[y].set_ylabel('# observations')
ax[y].bar(xbar-width/2, observed_month[0], width=0.2, color='dodgerblue', align='center',label='observed')
ax[y].bar(xbar+width/2, none_month[0], width=0.2, color='hotpink', align='center',label='none')
plt.legend()
ax[y].set_title("Clovellu "+str(yearr[y]))
plt.show()
def GetDateSomeLikelyNone(beach_nb,bluebottle_nb):
date_number = []
for j in range(len(date[beach_nb])):
if bluebottles[beach_nb][j]==bluebottle_nb:
date_number.append(date[beach_nb][j])
return date_number
def CalcHist(file):
observedd=[]
likelyy=[]
nonee=[]
for i in range(len(file)):
nonee.append(file.Noone[i])
observedd.append(file.Observed[i])
likelyy.append(file.Likely[i])
return observedd,likelyy, nonee
def PlotHist():
f_monthly=[]
filesmonthly = glob.glob('../outputs_observation_data/monthly*.csv')
obs=[0 for i in range(12)]
lik=[0 for i in range(12)]
non=[0 for i in range(12)]
month=np.arange(12)
for i in range(len(filesmonthly)):
f_monthly.append(pd.read_csv(filesmonthly[i]))
obs[i], lik[i], non[i]=CalcHist(f_monthly[i])
for i in range(12):
obbs=np.mean(obs[:])
liik=np.mean(lik[:])
noon=np.mean(non[:])
ax = plt.subplot(111)
bins=np.arange(1,14)
ax.set_xticks(bins[:-1])
ax.set_xticklabels(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'])
ax.bar(month-0.2,obbs,width=0.2,color='lightskyblue',align='center',label='observed')
ax.bar(month,liik,width=0.2,color='lightpink',align='center',label='likely')
ax.bar(month+0.2,noon,width=0.2,color='grey',align='center',label='none')
plt.legend()
files_name = glob.glob('../raw_observation_data/bluebottle_lifeguard_reports/*2.xlsx') #0Clovelly 1Coogee 2Maroubra
beach=[]
date_bb=[0,1,2]
date=[0,1,2]
water_temp=[0,1,2]
bluebottles=[0,1,2]
description=[0,1,2]
date_box=[0,1,2]
for i in range(0,len(files_name)):
beach.append(pd.read_excel(files_name[i]))
for i in range(0,len(water_temp)):
date_bb[i], water_temp[i], bluebottles[i], description[i] = GetVariables(beach[i])
date[0]=date_bb[0]
date[1]=date_bb[1][:1036] #delete data before 05/2016
date[2]=date_bb[2][:1025] #delete data before 05/2016
water_temp[1]=water_temp[1][:1036]
water_temp[2]=water_temp[2][:1025] #delete data before 05/2016
bluebottles[1]=bluebottles[1][:1036]
bluebottles[2]=bluebottles[2][:1025]
description[1]=description[1][:1036]
description[2]=description[2][:1025]
for i in range(0,len(water_temp)):
date_box[i]=[GetDateSomeLikelyNone(i,0.),GetDateSomeLikelyNone(i,0.5),GetDateSomeLikelyNone(i,1.)]
#PlotHist()
#TableDiff(date[0],date[1],bluebottles[0],bluebottles[1])
#TableDiff(date[0],date[2],bluebottles[0],bluebottles[2])
#TableDiff(date[1],date[2],bluebottles[1],bluebottles[2])
#PlotTemp()
#TableMonthBeach()
"""
BOM data
"""
def GetBOMVariables(filename):
"""
Return date, water temp, #of bluebottles of a file
"""
hour, datee, date, water_temp, wind_direction, wind_speed= [], [], [], [], [], []
for i in range(len(filename)):
if filename.Water_Temperature[i]>0:
water_temp.append(filename.Water_Temperature[i])
datee.append(filename.Date_UTC_Time[i][:11])
date.append(time(str(filename.Date_UTC_Time[i][:2]),str(filename.Date_UTC_Time[i][3:6]),str(filename.Date_UTC_Time[i][7:11])))
hour.append(int(filename.Date_UTC_Time[i][12:14]))
wind_direction.append(filename.Wind_Direction[i])
wind_speed.append(filename.Wind_Speed[i])
for i in range(len(date)):
date[i].jan_to_01()
BOMdate = []
BOMtime_UTC = np.zeros(len(date))
BOMtime = np.zeros(len(date))
for l in range(len(date)):
BOMdate.append(datetime.date(int(date[l].year), int(date[l].month), int(date[l].day)))
BOMtime_UTC[l] = BOMdate[l].toordinal() + hour[l]/24 #UTC
BOMtime = BOMtime_UTC + 10/24
return BOMtime, BOMdate, water_temp, wind_direction, wind_speed
def BoxPlot(nb,date_plot,BOMdaily):
"""
Box plot pour la plage numero nb de wind direction pour les 3 cas : none likely observed
"""
location=['Clovelly','Coogee','Maroubra']
wind_direction_box0=[]
wind_direction_box1=[]
wind_direction_box2=[]
for i in range(len(date_box[nb][0])):
for j in range(len(date_plot)):
if date_box[nb][0][i]==date_plot[j]:
if np.isnan(BOMdaily[j-1])==False:
wind_direction_box0.append(BOMdaily[j-1])
for i in range(len(date_box[nb][1])):
for j in range(len(date_plot)):
if date_box[nb][1][i]==date_plot[j]:
if np.isnan(BOMdaily[j-1])==False:
wind_direction_box1.append(BOMdaily[j-1])
for i in range(len(date_box[nb][2])):
for j in range(len(date_plot)):
if date_box[nb][2][i]==date_plot[j]:
if np.isnan(BOMdaily[j-1])==False:
wind_direction_box2.append(BOMdaily[j-1])
x=[wind_direction_box0, wind_direction_box1, wind_direction_box2]
fig = plt.figure(figsize=(12,9))
plt.title(location[nb])
plt.ylabel('Wind direction (degrees)')
plt.boxplot(x,whis=[5,95])
plt.xticks([1,2,3],['None','Likely','Some'])
plt.show()
# fig.savefig("../outputs_observation_data/box_plot_past_"+str(location[nb])+".png",dpi=300)
def DailyAverage():
t=[]
BOMwind_direction_daily=[]
for i in range(0,len(BOMdate)-1):
if BOMdate[i]!=BOMdate[i+1]:
t.append(BOMdate[i])
for j in range(len(t)):
for i in range(len(BOMwind_direction)):
if t[j]==BOMdate[i]:
if t[j]!=BOMdate[i+1]:
BOMwind_direction_daily.append(np.mean(BOMwind_direction[i-23:i+1]))
return BOMwind_direction_daily, t
def WindDirectionTime(nb, date_plot, BOMdaily):
fig=plt.figure(figsize=(12,9))
bluebottlesoupas=[]
location=['Clovelly','Coogee','Maroubra']
for i in range(len(date_plot)):
for j in range(len(date[nb])):
if date[nb][j]==date_plot[i]:
bluebottlesoupas.append(bluebottles[nb][j])
ax=plt.axes()
years = mdates.YearLocator() # every year
months = mdates.MonthLocator(range(0, 12), interval=2) # every 2month
years_fmt = mdates.DateFormatter('%Y')
month_fmt = mdates.DateFormatter('%m')
for i in range(len(bluebottlesoupas)):
if bluebottlesoupas[i]==1.0:
observed=plt.scatter(date_plot[i],BOMdaily[i-1],color='dodgerblue',s=12)
elif bluebottlesoupas[i]==0.5:
likely=plt.scatter(date_plot[i],BOMdaily[i-1],color='palegreen',s=12)
elif bluebottlesoupas[i]==0.:
none=plt.scatter(date_plot[i],BOMdaily[i-1],color='hotpink',alpha=0.3, marker='+',s=10)
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_minor_formatter(month_fmt)
fig.autofmt_xdate()
plt.ylabel('Wind Direction')
plt.title(location[nb])
plt.legend((observed, likely, none),
('observed','likely','none'),
scatterpoints=1,
loc='lower right',
ncol=3,
fontsize=8)
plt.show()
# fig.savefig("../outputs_observation_data/gust_direction_past_"+str(location[nb])+".png",dpi=300)
#file_name = '../raw_observation_data/bom_port_kembla/all_IDO.csv'
#f=pd.read_csv(file_name)
#BOMtime, BOMdate, BOMwater_temp, BOMwind_direction, BOMwind_speed = GetBOMVariables(f)
#BOMdaily,date_plot=DailyAverage()
#plt.hist(x,bins=30)
#plt.ylabel('proba')
def GetKurnellData(file):
day=np.zeros(len(file))
month=np.zeros(len(file))
year=np.zeros(len(file))
hours=np.zeros(len(file))
minutes=np.zeros(len(file))
date=[]
def GetU(daily_speed,daily_direction):
wind_u = - daily_speed/3.6 * np.sin(np.pi / 180 * daily_direction)
return wind_u
def GetV(daily_speed,daily_direction):
wind_v = - daily_speed/3.6 * np.cos(np.pi / 180 * daily_direction)
return wind_v
for i in range(len(file)):
minutes[i]=file.MI_local_time[i]
hours[i]=file.HH24[i]
day[i]=file.DD[i]
month[i]=file.MM[i]
year[i]=file.YY[i]
for i in range(len(file)):
date.append(datetime.date(int(year[i]),int(month[i]),int(day[i])))
return date, daily_direction, daily_speed, wind_u, wind_v, max_direction, max_speed
"""
for i in range(len(t)):
tt0 = np.where(day_w == t[i])[0] #prend lindice de quand c egal
Wind_speed_ms_Daily[i] = np.mean(nonans(Wind_speed_ms[tt0.astype(int)]))
"""
def PolarPlot(nb,direction):
blueb=[]
daily=[]
fig=plt.figure(figsize=(12,9))
location=['Clovelly','Coogee','Maroubra']
for i in range(len(direction)):
for j in range(len(date[nb])):
if date_plot[i]+datetime.timedelta(days=1)==date[nb][j]: #date_kurnell
daily.append(direction[i]*np.pi/180)
if bluebottles[nb][j]==0.:
blueb.append('hotpink')
elif bluebottles[nb][j]==0.5:
blueb.append('palegreen')
elif bluebottles[nb][j]==1.:
blueb.append('dodgerblue')
ax = plt.subplot(111, projection='polar')
theta = daily
r=8.*np.random.rand(len(daily))+1
colors = blueb
legend_elements = [Line2D([0],[0],marker='o',label='None', color='w',markerfacecolor='hotpink', markersize=10),
Line2D([0],[0],marker='o',label='Likely', color='w',markerfacecolor='palegreen', markersize=10),
Line2D([0],[0],marker='o',label='Observed', color='w',markerfacecolor='dodgerblue', markersize=10)]
legend1=plt.legend(handles=legend_elements, loc='lower right')
ax.add_artist(legend1)
ax.scatter(theta, r, c=colors, cmap='hsv', alpha=0.75)
ax.set_rorigin(-2.5)
ax.set_theta_zero_location('W', offset=10)
plt.title("Daily averaged wind direction (day before) at "+str(location[nb]))
plt.show()
# fig.savefig("../outputs_observation_data/with_BOMdata/polar_plot_"+str(location[nb])+".png",dpi=300)
#file_name_kurnell = '../raw_observation_data/wind_kurnell_sydney_observatory/Kurnell_Data.csv'
#file=pd.read_csv(file_name_kurnell)
#df = file.apply(pd.to_numeric, args=('coerce',)) # inserts NaNs where empty cell!!! grrrr
#date_kurnell, direction_kurnell, speed_kurnell, u_kurnell, v_kurnell, max_direction, max_speed=GetKurnellData(df)
def UVplot():
years = mdates.YearLocator() # every year
months = mdates.MonthLocator(range(0, 12), interval=2) # every 2month
years_fmt = mdates.DateFormatter('%Y')
month_fmt = mdates.DateFormatter('%m')
fig=plt.figure()
ax=plt.subplot(2,1,1)
plt.plot(date_kurnell,v_kurnell)
plt.ylabel('V')
ax1=plt.subplot(2,1,2)
plt.plot(date_kurnell,u_kurnell)
plt.ylabel('U')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_minor_formatter(month_fmt)
ax1.xaxis.set_major_locator(years)
ax1.xaxis.set_major_formatter(years_fmt)
ax1.xaxis.set_minor_locator(months)
ax1.xaxis.set_minor_formatter(month_fmt)
plt.show()
blueb1_2017, date1_2017 = [], []
blueb1_2018, date1_2018 = [], []
for i in range(len(date[1])):
if date[1][i].year==2017:
blueb1_2017.append(bluebottles[1][i])
date1_2017.append(date[1][i])
elif date[1][i].year==2018:
blueb1_2018.append(bluebottles[1][i])
date1_2018.append(date[1][i])
blueb1_2017=pd.Series(blueb1_2017)
blueb1_2018=pd.Series(blueb1_2018)
date1_2017=pd.Series(np.asarray(date1_2017).astype('datetime64'))
date1_2018=pd.Series(np.asarray(date1_2018).astype('datetime64'))
# blueb1_2018.groupby(date1_2018.dt.week).sum().plot(kind='bar')
blueb2_2017, date2_2017 = [], []
blueb2_2018, date2_2018 = [], []
for i in range(len(date[2])):
if date[2][i].year==2017:
blueb2_2017.append(bluebottles[2][i])
date2_2017.append(date[2][i])
elif date[2][i].year==2018:
blueb2_2018.append(bluebottles[2][i])
date2_2018.append(date[2][i])
blueb2_2017=pd.Series(blueb2_2017)
blueb2_2018= | pd.Series(blueb2_2018) | pandas.Series |
import os
import sqlite3
import pandas as pd
from pygbif import occurrences
from pygbif import species
from datetime import datetime
import geopandas as gpd
import shapely
import numpy as np
import fiona
from shapely.geometry import shape, Polygon, LinearRing, Point
from dwca.read import DwCAReader
import random
from shapely import wkt
# occurrece records table datatypes
output_schema = {"GBIF_download_doi": "str",
"accessRights": "str",
"basisOfRecord": "str",
"bibliographicCitation": "str",
"collectionCode": "str",
"coordinatePrecision": "float",
"coordinateUncertaintyInMeters": "float",
"dataGeneralizations": "str",
"datasetName": "str",
"decimalLatitude": "str",
"decimalLongitude": "str",
"detection_distance_m": "int",
"ebird_id": "str",
"effort_distance_m": "int",
"establishmentMeans": "str",
"eventDate": "str",
"eventRemarks": "str",
"filter_set_name": "str",
"footprintSRS": "str",
"footprintWKT": "str",
"gbif_id": "str",
"general_remarks": "str",
"geodeticDatum": "str",
"georeferenceProtocol": "str",
"georeferenceRemarks": "str",
"georeferenceVerificationStatus": "str",
"georeferencedBy": "str",
"gps_accuracy_m": "int",
"habitat": "str",
"identificationQualifier": "str",
"identifiedBy": "str",
"identifiedRemarks": "str",
"individualCount": "int",
"informationWitheld": "str",
"institutionID": "str",
"issues": "str",
"license": "str",
"locality": "str",
"locationAccordingTo": "str",
"locationRemarks": "str",
"modified": "str",
"nominal_xy_precision": "float",
"occurrenceRemarks": "str",
"occurrenceStatus": "str",
"organismQuantity": "str",
"organismQuantityType": "str",
"radius_m": "float",
"record_id": "str",
"recordedBy": "str",
"retrieval_date": "str",
"samplingProtocol": "str",
"samplingEffort": "str",
"scientificName": "str",
"source": "str",
"taxonConceptID": "str",
"taxon_info_name": "str",
"verbatimLocality": "str",
"weight": "int",
"weight_notes": "str"}
# Core functions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def build_output_database(output_database):
"""
Create a database for storing occurrence and taxon concept data.
The column names that are "camel case" are Darwin Core attributes, whereas
lower case names containing "_" between words are not. Only those Darwin
core attributes that could be useful for filtering and assigning weights
are included.
Parameters
----------
output_database : Path for sqlite database to create; string.
Returns
-------
Nothing.
"""
# Delete the database if it already exists
if os.path.exists(output_database):
os.remove(output_database)
# Create or connect to the database
conn = sqlite3.connect(output_database)
# Create a table for occurrence records.
df = (pd.DataFrame(columns=output_schema.keys())
.astype(output_schema)
.to_sql(name='occurrence_records', con=conn, if_exists='replace'))
conn.close()
return
def get_EBD_records(taxon_info, filter_set, working_directory, EBD_file,
query_name, R_home):
'''
Gets eBird records from a copy of the Ebird Basic Dataset that you
acquired. Primarily runs R code that uses the Auk package to query the
data set in an efficient manner. Some filters can be applied during the
query, but others have to be applied to the query results. Date and
bounding box filters require quite a bit of preparation and conditions.
Parameters
----------
taxon_info : your taxon concept; dictionary
filter_set : name of the filter set to apply; dictionary
working_directory : path to use for table of filtered query results; string
EBD_file : path to your downloaded copy of the Ebird Basic Dataset; string
query_name : the name you chose for your query; string
R_home : path to R install to use, get from wranglerconfig; string
Returns
-------
Data frame of eBird records
'''
# Point to R home
os.environ["R_HOME"] = R_home
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
# import R's utility package, select a mirror for R packages
utils = rpackages.importr('utils')
# select the first mirror in the list
utils.chooseCRANmirror(ind=1)
# R packages to load
packnames = ('sf', 'auk', 'lubridate', 'tidyverse')
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
utils.install_packages(StrVector(names_to_install))
# Some file names
queried_ebd = working_directory + "tmp_ebd.txt"
processed_ebd = working_directory + query_name + ".csv"
output_database = working_directory + query_name + '.sqlite'
# Replace None values in fitler_set with "" to fit R code.
for x in filter_set.keys():
if filter_set[x] == None:
filter_set[x] = ""
for x in taxon_info.keys():
if taxon_info[x] == None:
taxon_info[x] = ""
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< R CODE
code = '''
EBD_file <- "{0}"
queried_ebd <- "{1}"
processed_ebd <- "{2}"
species <- "{3}"
country <- "{4}"
months_range <- "{5}"
years_range <- "{6}"
lon_range <- "{7}"
lat_range <- "{8}"
max_coordinate_uncertainty <- {9}
taxon_polygon <- "{10}"
query_polygon <- "{11}"
# Auk uses filters that are compiled and incorporated into a query.
# This poses a challenge for dynamic filtering where filters may or may
# not be used. We have to set defaults.
library(auk)
library(tidyverse)
library(sf)
library(lubridate)
starttime = Sys.time() # Runtime has been 30 min
# prep dates -------------------------------------------------------------
# auk doesn't allow filtering on months AND year with read_ebd; they have
# to be done separately, one with auk filters and the other after with
# dplyr. I chose to do the year filtering with auk to minimize size of
# returned tibble. This all requires formatting dates as text correctly.
# format start month
if (months_range != "") {{
if (as.numeric(strsplit(months_range, ",")[[1]][1]) < 10) {{
start_month <- paste(c("0", strsplit(months_range, ",")[[1]][1]), collapse="")
}} else {{
start_month <- strsplit(months_range, ",")[[1]][1]}}
start_month <- str_trim(start_month)
# format end month
if (as.numeric(strsplit(months_range, ",")[[1]][2]) < 10) {{
end_month <- paste(c("0", strsplit(months_range, ",")[[1]][2]), collapse="")
}} else {{
end_month <- strsplit(months_range, ",")[[1]][2]}}
end_month <- str_trim(end_month)
# create vector of ok months for filtering with dplyr
ok_months <- seq(as.numeric(start_month), as.numeric(end_month))
}}
print(ok_months)
# pull out start and end years
if (years_range != "") {{
start_yr <- str_trim(strsplit(years_range, ",")[[1]][1])
end_yr <- str_trim(strsplit(years_range, ",")[[1]][2])
}}
# define data filter according to different scenarios
if (months_range == "" && years_range == "") {{
# get all dates
date_filter <- c("*-01-31", "*-12-31")
}} else if (months_range != "" && years_range != "") {{
# get certain months and years. we have to find the last possible day.
end_day <- lubridate::days_in_month(paste(c(end_yr, "-", end_month, "-01"),
collapse=""))
date_filter <- c(paste(c(start_yr, "-", start_month, "-01"), collapse=""),
paste(c(end_yr, "-", end_month, "-", end_day), collapse=""))
}} else if (months_range == "" && years_range != "") {{
# get all months from certain years
date_filter <- c(paste(c(start_yr, "-01-01"), collapse=""),
paste(c(end_yr, "-12-31"), collapse=""))
}} else if (months_range != "" && years_range == "") {{
# get certain months from all years. we have to find the last possible day.
yr <- year(today())
end_day <- lubridate::days_in_month(paste(c(yr, "-", end_month, "-01"),
collapse=""))
date_filter <- c(paste(c("*-", start_month, "-01"), collapse=""),
paste(c("*-", end_month, "-", end_day), collapse=""))
}}
# prep bounding box -------------------------------------------------------
# make a full earth extent for use below
earth <- c(-180, -90, 180, 90)
bbox <- NULL
if (query_polygon == "" && taxon_polygon == "") {{
bbox <- earth
}} else if (query_polygon != "" && taxon_polygon == "") {{
bbox <- st_bbox(st_as_sfc(query_polygon))
}} else if (query_polygon == "" && taxon_polygon != "") {{
bbox <- st_bbox(st_as_sfc(taxon_polygon))
}} else if (query_polygon != "" && taxon_polygon != "") {{
# Get/use the intersection of the two polygons
filter_polygon <- st_as_sfc(query_polygon)
sp_polygon <- st_as_sfc(taxon_polygon)
bbox <- st_bbox(st_intersection(filter_polygon, sp_polygon))
}}
# prep bounding box vector for filter if lat and lon ranges were provided,
# and if other polygons were not
if (lat_range == "" || lon_range == "") {{
null_box <- TRUE
}} else {{
null_box <- FALSE
}}
if (bbox == earth && null_box == FALSE) {{
lat_min <- as.numeric(strsplit(lat_range, ",")[[1]][1])
lat_max <- as.numeric(strsplit(lat_range, ",")[[1]][2])
lng_min <- as.numeric(strsplit(lon_range, ",")[[1]][1])
lng_max <- as.numeric(strsplit(lon_range, ",")[[1]][2])
bbox <- c(lng_min, lat_min, lng_max, lat_max)
names(bbox) <- c("xmin", "ymin", "xmax", "ymax")
attr(bbox, "class") = "bbox"
}}
# prep country ------------------------------------------------------------
if (country == "") {{country <- "US"}}
# prep distance -----------------------------------------------------------
# a gps precision for eBird checklists must be assumed, since not given,
# for estimation of coordinateUncertaintyInMeters
EBD_gps_precision <- 10
# account for gps precision in distance filter. error could exist on
# either end of a straight line path, so double precision when subtracting.
max_distance <- as.integer(ceiling((max_coordinate_uncertainty-(2*EBD_gps_precision))/1000))
print(max_distance)
# query -------------------------------------------------------------------
records0 <- EBD_file %>%
# 1. reference file
auk_ebd() %>%
# 2. define filters
auk_species(species=c(species)) %>%
auk_date(date=date_filter) %>%
auk_country(country=country) %>%
auk_bbox(bbox=bbox) %>%
auk_distance(distance=c(0, max_distance)) %>%
# 3. run filtering
auk_filter(file = queried_ebd, overwrite = TRUE) %>%
# 4. read text file into r data frame
read_ebd(unique=TRUE)
# prep data frame for python ----------------------------------------------
# add column for eBird species code
ebird_code <- select(filter(ebird_taxonomy, common_name==species),
species_code)[[1]]
ebd_data <- records0 %>%
mutate(eBird_sp_code = ebird_code,
retrieval_date = auk_ebd_version(EBD_file)[1][1]) %>%
select(eBird_sp_code, global_unique_identifier, checklist_id,
project_code, last_edited_date, common_name,
observation_count, locality, latitude, longitude,
observation_date, observer_id, effort_distance_km,
protocol_type, effort_area_ha, trip_comments,
species_comments) %>%
mutate(effort_distance_m = as.numeric(effort_distance_km)*1000) %>%
filter(month(observation_date) %in% ok_months) %>%
write_csv(processed_ebd)
endtime = Sys.time()
print(endtime - starttime)
'''.format(EBD_file, queried_ebd, processed_ebd, taxon_info['EBIRD_ID'],
filter_set["country"], filter_set["months_range"],
filter_set["years_range"], filter_set["lon_range"],
filter_set["lat_range"],
filter_set["max_coordinate_uncertainty"],
taxon_info["TAXON_EOO"], filter_set["query_polygon"])
# Run code
timestamp = datetime.now()
robjects.r(code)
print("Ran EBD query with Auk: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< READ OUTPUT
records0 = pd.read_csv(processed_ebd)
'''
This should eventually be usable (or something similar) to avoid having to
write the R data frame and then read it in with pandas. It is supposed to
be possible to convert robject data frames to pandas data frames but the
rpy2 available from conda 2.x doesn't actually work.
# *************************************************************************
# Retrieve the filtered ebird data frame --- this should work, but doesn't
rdf = robjects.globalenv['ebd_data']
# Using a conversion context in which the pandas conversion is
# added to the default conversion rules, the rpy2 object
# (an R data frame) is converted to a pandas data frame.
from rpy2.robjects import pandas2ri
robjects.pandas2ri.activate() # should automatically convert r data frame to pandas
from rpy2.robjects import default_converter
from rpy2.robjects.conversion import localconverter
with localconverter(robjects.default_converter + pandas2ri.converter):
records0 = robjects.conversion.ri2py(rdf)
'''
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< APPLY SPATIAL FILTER
timestamp = datetime.now()
# Make data frame spatial
gdf = gpd.GeoDataFrame(records0,
geometry=gpd.points_from_xy(records0['longitude'],
records0['latitude']))
# It could be that user opted not to use species geometry.
if filter_set['use_taxon_geometry'] == False:
EOO = None
# A geometry could be stated for the species, assess what to do
# Replace "" values in fitler_set with None to fit Python code.
for x in filter_set.keys():
if filter_set[x] == "":
filter_set[x] = None
for x in taxon_info.keys():
if taxon_info[x] == "":
taxon_info[x] = None
EOO = taxon_info["TAXON_EOO"]
AOI = filter_set["query_polygon"]
if AOI is None and EOO is None:
filter_polygon = None
elif AOI is not None and EOO is None:
filter_polygon = shapely.wkt.loads(AOI)
elif AOI is None and EOO is not None:
filter_polygon = shapely.wkt.loads(EOO)
elif AOI is not None and EOO is not None:
# Get/use the intersection of the two polygons in this case
AOI_polygon = shapely.wkt.loads(AOI)
EOO_polygon = shapely.wkt.loads(EOO)
filter_polygon = AOI_polygon.intersection(EOO_polygon)
print("Calculated the spatial filter polygon: "
+ str(datetime.now() - timestamp))
# Find which records have coordinates that fall within the polygon
timestamp = datetime.now()
if filter_polygon is not None:
gdf = gdf[gdf["geometry"].within(filter_polygon)]
print("Applied spatial filter: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE
timestamp = datetime.now()
df_populated1 = pd.DataFrame(records0.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = len(records0)
df_populated1['populated(n)'] = df_populated1[0]
df_populated2 = df_populated1.filter(items=['included(n)', 'populated(n)'],
axis='columns')
df_populated2.index.name = 'attribute'
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
df_populated2.to_sql(name='eBird_fields_returned', con=conn, if_exists='replace')
print("Summarized fields returned: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREP FOR OUTPUT
timestamp = datetime.now()
# Rename columns
gdf = gdf.rename({'eBird_sp_code': 'ebird_id',
'global_unique_identifier': 'record_id',
'latitude': 'decimalLatitude',
'longitude': 'decimalLongitude',
'observation_count': 'individualCount',
'observation_date': 'eventDate',
'project_code': 'collectionCode',
'protocol_type': 'samplingProtocol',
'species_comments': 'identifiedRemarks',
'trip_comments': 'eventRemarks'}, axis=1)
# Drop columns
records1 = gdf.filter(list(output_schema.keys()), axis=1)
# Populate columns
records1["institutionID"] = "clo"
records1["collectionCode"] = "EBIRD"
records1["datasetName"] = "EBD"
records1["source"] = "eBird"
records1["basisOfRecord"] = "HUMAN_OBSERVATION"
records1["GBIF_download_doi"] = "bypassed"
records1["occurrenceStatus"] = "PRESENT"
records1 = (records1
.fillna({"effort_distance_m": 0, "gps_accuracy_m": 30})
.replace({"individualCount": {"X": 1}}))
# Add EBD records to a template data frame
schema_df = pd.DataFrame(columns=list(output_schema.keys()))
records2 = schema_df.combine_first(records1)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Results
print("Prepared the eBird records for processing: "
+ str(datetime.now() - timestamp))
return records2
def get_GBIF_records(taxon_info, filter_set, query_name, working_directory,
username, password, email):
'''
Retrieves species occurrence records from GBIF. Filters occurrence
records, buffers the xy points, and saves them in a database. Finally,
exports some Shapefiles.
Gets species occurrence records from GBIF. Can accomodate use of the GBIF
API or Darwin Core Archive download via email. Some filters can be applied
during the query, but others have to be applied to the query results.
Parameters
----------
taxon_info : your taxon concept; dictionary
filter_set : name of the filter set to apply; dictionary
query_name : the name you chose for your query; string
working_directory : path to use for table of filtered query results; string
username : your GBIF username; string
password : your GBIF password; string
email : the email account associated with your GBIF account; string
Returns
-------
Data frame of GBIF occurrence records
'''
pd.set_option('display.width', 1000)
os.chdir('/')
timestamp = datetime.now()
# Some prep
output_database = working_directory + query_name + '.sqlite'
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
cursor = conn.cursor()
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< TAXON INFO
gbif_id = taxon_info["GBIF_ID"]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREP FILTERS
years = filter_set["years_range"]
months = filter_set["months_range"]
latRange = filter_set["lat_range"]
lonRange = filter_set["lon_range"]
geoIssue = filter_set["geoissue"]
country = filter_set["country"]
dwca_download = filter_set["get_dwca"]
EOO = taxon_info["TAXON_EOO"]
AOI = filter_set["query_polygon"]
# It could be that user opted not to use species geometry.
if filter_set['use_taxon_geometry'] == False:
EOO = None
# A geometry could be stated for the species, assess what to do
if AOI is None and EOO is None:
filter_polygon = None
elif AOI is not None and EOO is None:
filter_polygon = AOI
elif AOI is None and EOO is not None:
filter_polygon = EOO
elif AOI is not None and EOO is not None:
# Get/use the intersection of the two polygons in this case
AOI_polygon = shapely.wkt.loads(AOI)
EOO_polygon = shapely.wkt.loads(EOO)
intersection = AOI_polygon.intersection(EOO_polygon)
# Make the polygon's outer ring counter clockwise
if intersection.exterior.is_ccw == False:
print("Reordered filter polygon coordinates")
intersection = shapely.geometry.polygon.orient(intersection,
sign=1.0)
# Get the well-known text version of the polygon
filter_polygon = shapely.wkt.dumps(intersection)
else:
filter_polygon = shapely.wkt.dumps(intersection)
print("Prepared filter set and sorted out geometry constraints: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< GET RECORD COUNT
timestamp = datetime.now()
# First, find out how many records there are that meet criteria
occ_search = occurrences.search(taxonKey=gbif_id,
year=years,
month=months,
decimalLatitude=latRange,
decimalLongitude=lonRange,
hasGeospatialIssue=geoIssue,
hasCoordinate=True,
country=country,
geometry=filter_polygon)
record_count = occ_search["count"]
# Return a message if number of records excedes the known dwca-reader limit
print(str(record_count) + " records available")
if record_count > 4500000:
print("!!!!!!! Too many records to proceed. Break up the query",
" with year or other parameters.")
if record_count <= 0 or record_count > 4500000:
# no records available so delete database and return empty data frame
conn.close()
os.remove(output_database)
return pd.DataFrame()
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< API QUERY
if dwca_download == False:
# Get records in batches, saving into master list.
all_jsons = []
batches = range(0, record_count, 300)
for i in batches:
batch = occurrences.search(gbif_id,
limit=300,
offset=i,
year=years,
month=months,
decimalLatitude=latRange,
decimalLongitude=lonRange,
hasGeospatialIssue=geoIssue,
hasCoordinate=True,
country=country,
geometry=filter_polygon)
occs = batch['results']
all_jsons = all_jsons + occs
# Get a list of keys that were returned
api_keys = set([])
for j in all_jsons:
api_keys = api_keys | set(j.keys())
# Load json records into a data frame, via a dictionary
insertDict = {}
for k in list(api_keys):
insertDict[k] = []
for j in all_jsons:
present_keys = list(set(j.keys()) & api_keys)
for prk in present_keys:
insertDict[prk] = insertDict[prk] + [str(j[prk])]
missing_keys = list(api_keys - set(j.keys()))
for mik in missing_keys:
insertDict[mik] = insertDict[mik] + ["UNKNOWN"]
dfRaw = pd.DataFrame(insertDict).rename({"occurrenceID": "record_id"},
axis=1)
print("Downloaded records: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< EMAIL QUERY
if dwca_download == True:
timestamp = datetime.now()
'''
Request data using the download function. Results are emailed as a zip
file containing the Darwin Core files. The download can take a while
to generate and is not immediately available once the download_get
command has been issued. Use a while and try loop to handle the wait.
The zipdownload variable will be a dictionary of the path,
the file size, and the download key unique code. It can be used
to change the file name, unzip the file, etc.
'''
# First, build a query list. NoneType values cause problems, so only
# add arguments if their value isn't NoneType.
download_filters = ['taxonKey = {0}'.format(gbif_id)]
download_filters.append('hasCoordinate = True')
if country is not None:
download_filters.append('country = {0}'.format(country))
if years is not None:
download_filters.append('year >= {0}'.format(years.split(",")[0]))
download_filters.append('year <= {0}'.format(years.split(",")[1]))
if months is not None:
download_filters.append('month >= {0}'.format(months.split(",")[0]))
download_filters.append('month <= {0}'.format(months.split(",")[1]))
if filter_polygon is not None:
download_filters.append("geometry = {0}".format(filter_polygon))
if geoIssue is not None:
download_filters.append('hasGeospatialIssue = {0}'.format(geoIssue))
if latRange is not None:
download_filters.append('decimalLatitude >= {0}'.format(latRange.split(",")[0]))
download_filters.append('decimalLatitude <= {0}'.format(latRange.split(",")[1]))
if lonRange is not None:
download_filters.append('decimalLongitude >= {0}'.format(lonRange.split(",")[0]))
download_filters.append('decimalLongitude <= {0}'.format(lonRange.split(",")[1]))
# Get the value of the download key
try:
d = occurrences.download(download_filters,
pred_type='and',
user=username,
pwd=password,
email=email)
dkey = d[0]
except Exception as e:
print(e)
print(download_filters)
# Get the download, if not ready, keep trying
print("Waiting for the Darwin Core Archive.....")
timestamp2 = datetime.now()
gotit = False
while gotit == False:
try:
# Download the file
timestamp = datetime.now()
zipdownload = occurrences.download_get(key=dkey, path=working_directory)
print("Wait time for DWcA creation: "
+ str(datetime.now() - timestamp2))
print("Wait time for DWcA download: "
+ str(datetime.now() - timestamp))
gotit = True
except:
wait = datetime.now() - timestamp2
if wait.seconds > 60*1440:
gotit = True
print("FAILED!!! -- timed out after 24 hrs. ",
"Try again later or split up query with ",
"year paramters")
# Read the relevant files from within the Darwin Core archive
timestamp = datetime.now()
with DwCAReader(zipdownload["path"]) as dwca:
try:
dfRaw = dwca.pd_read('occurrence.txt', low_memory=False)
except Exception as e:
print("Read error:")
print(e)
try:
doi = dwca.metadata.attrib["packageId"]
except Exception as e:
print("DOI error:")
print(e)
try:
citations = dwca.open_included_file('citations.txt').read()
except Exception as e:
citations = "Failed"
print("Citation error:")
print(e)
try:
rights = dwca.open_included_file('rights.txt').read()
except Exception as e:
rights = "Failed"
print("Rights error:")
print(e)
print("Wait time for reading the DwCA: "
+ str(datetime.now() - timestamp))
# Record DWCA metadata
# Store the value summary for the selected fields in a table.
timestamp = datetime.now()
cursor.executescript("""CREATE TABLE GBIF_download_info
(download_key TEXT, doi TEXT, citations TEXT,
rights TEXT);""")
cursor.execute('''INSERT INTO GBIF_download_info (doi, download_key)
VALUES ("{0}", "{1}")'''.format(doi, dkey))
try:
cursor.execute('''UPDATE GBIF_download_info
SET citations = "{0}"
WHERE doi = "{1}"'''.format(citations, doi))
except Exception as e:
print(e)
cursor.execute('''UPDATE GBIF_download_info
SET citations = "Failed"
WHERE doi = "{0}"'''.format(doi))
try:
cursor.execute('''UPDATE GBIF_download_info
SET rights = "{0}"
WHERE doi = "{1}"'''.format(rights, doi))
except Exception as e:
print(e)
cursor.execute('''UPDATE GBIF_download_info
SET rights = "Failed"
WHERE doi = "{0}"'''.format(doi))
print("Stored GBIF Download DOI etc.: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE
timestamp = datetime.now()
# We don't want to count the "UNKNOWNS" we added
if dwca_download == False:
df_raw2 = dfRaw.replace({"UNKNOWN": np.nan})
df_populated1 = pd.DataFrame(df_raw2.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = df_populated1[0]
df_populated1['populated(n)'] = df_populated1[0]
if dwca_download == True:
df_raw2 = dfRaw.copy()
df_populated1 = pd.DataFrame(df_raw2.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = len(dfRaw)
df_populated1['populated(n)'] = df_populated1[0]
df_populated2 = df_populated1.filter(items=['included(n)', 'populated(n)'],
axis='columns')
df_populated2.index.name = 'attribute'
df_populated2.to_sql(name='gbif_fields_returned', con=conn,
if_exists='replace')
print("Summarized fields returned: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREPARE
timestamp = datetime.now()
# Rename columns
records1 = dfRaw.rename({"issue": "issues", "id": "record_id"}, axis=1)
# Drop columns
records1 = records1.filter(items=output_schema.keys(), axis=1)
# Populate columns
records1["retrieval_date"] = str(datetime.now())
if filter_set["get_dwca"] == True:
records1["GBIF_download_doi"] = doi
else:
records1["GBIF_download_doi"] = "bypassed"
records1["source"] = "GBIF"
# Add GBIF records to template; replace and fillna to support astype()
records2 = (pd.DataFrame(columns=output_schema.keys())
.combine_first(records1)
# this replace is needed for API method
.replace({"coordinateUncertaintyInMeters": {"UNKNOWN": np.nan},
"radius_m": {"UNKNOWN": np.nan},
"coordinatePrecision": {"UNKNOWN": np.nan},
"nominal_xy_precision": {"UNKNOWN": np.nan},
"individualCount": {"UNKNOWN": 1},
"weight": {"UNKNOWN": 10},
"detection_distance_m": {"UNKNOWN": 0}})
.fillna({"coordinateUncertaintyInMeters": 0,
"radius_m": 0,
"individualCount": 1,
"weight": 10,
"detection_distance_m": 0,
"effort_distance_m": 0,
"coordinate_precision": 1,
"gps_accuracy_m": 30})
.astype(output_schema))
print("Prepared GBIF records for processing: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Results
return records2
def process_records(ebird_data, gbif_data, filter_set, taxon_info,
working_directory, query_name):
'''
Summarizes the values in the data frames, populates some fields,
apply filters, summarize what values persisted after filtering. Insert
results into the output db.
Parameters
----------
ebird_data : a data frame of records from eBird
gbif_data : a data frame of records from GBIF
output_database : path to the output database; string
filter_set : the filter set dictionary
taxon_info : the taxon information dictionary
Returns
-------
filtered_records : a data frame of filtered records.
'''
timestamp = datetime.now()
# Create or connect to the database
output_database = working_directory + query_name + ".sqlite"
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< MANAGE DATA TYPES
schema = output_schema
string_atts = {key:value for (key, value) in schema.items() if schema[key] == 'str'}
if ebird_data is not None:
ebird_data = ebird_data.astype(string_atts)
if gbif_data is not None:
gbif_data = gbif_data.astype(string_atts)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< REMOVE EBIRD FROM GBIF
if gbif_data is not None:
if ebird_data is not None:
gbif_data = gbif_data[gbif_data["collectionCode"].str.contains("EBIRD*") == False]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< COMBINE DATA FRAMES
if ebird_data is None:
df_unfiltered = gbif_data
if gbif_data is None:
df_unfiltered = ebird_data
if gbif_data is not None and ebird_data is not None:
# Concatenate the gbif and ebird tables
df_unfiltered = pd.concat([ebird_data, gbif_data])
print("Prepared data frames for processing: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE VALUES
timestamp = datetime.now()
# Make a list of columns to summarize values from
do_not_summarize = ['decimalLatitude', 'decimalLongitude',
'GBIF_download_doi', 'coordinateUncertaintyInMeters',
'detection_distance_m', 'eventDate', 'eventRemarks',
'filter_set_name', 'footprintSRS', 'footprintWKT',
'gbif_id', 'ebird_id', "effort_distance_m",
'general_remarks', 'georeferencedBy', 'habitat',
'georeferenceRemarks', 'identificationQualifier',
'identifiedBy', 'identifiedRemarks', 'individualCount',
'informationWitheld', 'locality',
'locationAccordingTo', 'locationRemarks', "modified",
'occurrenceRemarks', 'radius_m', 'record_id',
'recordedBy', 'retrieval_date', 'taxonConceptID',
'verbatimLocality', 'weight', 'weight_notes']
# Make a function to do the summarizing
def summarize_values(dataframe, step):
"""
Loops through columns and gets a count of unique values. Packages in
a df.
"""
attributes = []
summarize = [x for x in dataframe.columns if x not in do_not_summarize]
for column in summarize:
value_count = dataframe['record_id'].groupby(dataframe[column]).count()
value_df = (pd.DataFrame(value_count)
.reset_index()
.rename({'record_id': step, column: 'value'}, axis=1))
value_df["attribute"] = column
value_df = value_df[["attribute", "value", step]]
if value_df.empty == False:
attributes.append(value_df)
result = pd.concat(attributes)
return result
# Store value summary in a data frame
acquired = summarize_values(dataframe=df_unfiltered, step='acquired')
# Summarize sources
source_df1 = df_unfiltered[['institutionID', 'collectionCode',
'datasetName', 'record_id']]
source_summary1 = (source_df1
.groupby(by=['institutionID', 'collectionCode',
'datasetName'])
.size()
.reset_index(name='acquired'))
print("Summarized values acquired: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< POPULATE SOME COLUMNS
timestamp = datetime.now()
df_unfiltered.fillna(value={'individualCount': int(1)}, inplace=True)
df_unfiltered["weight"] = 10
df_unfiltered["weight_notes"] = ""
df_unfiltered["taxon_id"] = taxon_info["ID"]
df_unfiltered["gbif_id"] = taxon_info["GBIF_ID"]
df_unfiltered["ebird_id"] = taxon_info["EBIRD_ID"]
df_unfiltered["detection_distance_m"] = taxon_info["detection_distance_m"]
df_unfiltered["filter_set_name"] = filter_set["name"]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< COORDINATE PRECISION
'''In WGS84, coordinate precision is limited by longitude and varies across
latitudes and number of digits provided. Thus, coordinates have a nominal
precision that may limit values. Populate a column for this...'''
# Trim decimal length to 5 digits (lat and long).
# Anything more is false precision.
df_unfiltered["decimalLatitude"] = df_unfiltered["decimalLatitude"].apply(lambda x: coord_rounded(x, 5))
df_unfiltered["decimalLongitude"] = df_unfiltered["decimalLongitude"].apply(lambda x: coord_rounded(x, 5))
# Drop rows without a valid latitude or longitude
df_unfiltered.dropna(subset=["decimalLatitude", "decimalLongitude"],
inplace=True)
# Calculate the number of digits for latitude and longitude
df_unfiltered['digits_latitude'] = [len(x.split(".")[1]) for x in df_unfiltered['decimalLatitude']]
df_unfiltered['digits_longitude'] = [len(x.split(".")[1]) for x in df_unfiltered['decimalLongitude']]
# Estimate longitude precisions
df_unfiltered = nominal_x_precision(dataframe=df_unfiltered,
lat_column="decimalLatitude",
digits_column="digits_longitude",
output_column="nominal_x_precision")
# Latitude precision; lookup for latitude precision
digitsY = {1: 11112.0, 2: 1111.2, 3: 111.1, 4: 11.1, 5: 1.1}
df_unfiltered["nominal_y_precision"] = df_unfiltered["digits_latitude"].apply(lambda x: digitsY[x])
# Put the larger of the two nominal precisions in a column
df_unfiltered["nominal_xy_precision"] = np.where(df_unfiltered["nominal_y_precision"] > df_unfiltered["nominal_x_precision"], df_unfiltered["nominal_y_precision"], df_unfiltered["nominal_x_precision"])
# Clean up
df_unfiltered.drop(["temp", "temp2", "digits_latitude", "digits_longitude",
"nominal_x_precision", "nominal_y_precision"], axis=1,
inplace=True)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< BUFFER RADIUS
'''
Calculate a buffer distance from various parameters for the
point-radius method. Compilation has to differ with data source and
whether the user chose to use a default coordinate uncertainty. Components
of radius may include coordinateUncertaintyInMeters, coordinatePrecision,
GPS_accuracy_m, effort_distance_m, detection_distance_m.
Records are broken apart by source (GBIF, GBIF/EOD, EBD), processed,
and then concatenated in order to account for all conditions.
If footprintWKT is provided, it will be used by spatial_output instead
of point buffering.
'''
# Records from GBIF with coordinate uncertainty (georeferenced)
georef = df_unfiltered[df_unfiltered["coordinateUncertaintyInMeters"] > 0.0].copy()
if georef.empty == False:
#georef.fillna({"coordinatePrecision": 0.00001}, inplace=True)
georef["gps_accuracy_m"] = np.where(georef["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
georef["radius_m"] = georef["coordinateUncertaintyInMeters"]
print("Number of georeferenced GBIF records: " + str(len(georef)))
# Records from GBIF without coordinate uncertainty
gbif_nogeo = df_unfiltered[(df_unfiltered["coordinateUncertaintyInMeters"] == 0.0) & (df_unfiltered["collectionCode"].str.contains("EBIRD*") == False)].copy()
if gbif_nogeo.empty == False:
gbif_nogeo["gps_accuracy_m"] = np.where(gbif_nogeo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
if filter_set["default_coordUncertainty"] is not None:
print("Applying default coordinate uncertainties for GBIF records")
#gbif_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
gbif_nogeo["radius_m"] = filter_set["default_coordUncertainty"]
if filter_set["default_coordUncertainty"] is None:
print("Approximating coordinate uncertanties for GBIF records")
#gbif_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
gbif_nogeo["radius_m"] = gbif_nogeo["gps_accuracy_m"] + gbif_nogeo["detection_distance_m"] + gbif_nogeo["effort_distance_m"]
# Records from EBD
ebd_geo = df_unfiltered[df_unfiltered["source"] == "eBird"].copy()
if ebd_geo.empty == False:
#ebd_geo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
ebd_geo["gps_accuracy_m"] = np.where(ebd_geo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%d").year) < 2000, 100, 30)
ebd_geo["radius_m"] = ebd_geo["effort_distance_m"] + ebd_geo["gps_accuracy_m"] + ebd_geo["detection_distance_m"]
# Records from EOD (via GBIF)
eod_nogeo = df_unfiltered[(df_unfiltered["source"] == "GBIF") & (df_unfiltered["collectionCode"].str.contains("EBIRD*") == True)].copy()
if eod_nogeo.empty == False:
#eod_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
eod_nogeo["gps_accuracy_m"] = np.where(eod_nogeo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
eod_nogeo["effort_distance_m"] = 8047 # eBird best practices allows distance up to 5 mi length.
eod_nogeo["radius_m"] = eod_nogeo["effort_distance_m"] + eod_nogeo["gps_accuracy_m"] + eod_nogeo["detection_distance_m"]
# Concat df's if necessary
if filter_set['has_coordinate_uncertainty'] == True:
df_unfiltered2 = georef
to_concat = []
for x in [gbif_nogeo, georef, eod_nogeo, ebd_geo]:
if x.empty == False:
to_concat.append(x)
if len(to_concat) > 1:
df_unfiltered2 = pd.concat(to_concat)
if len(to_concat) == 1:
df_unfiltered2 = to_concat[0]
# Where coordinate precision is poor, overwrite the radius to be the precision.
df_unfiltered2["radius_m"] = np.where(df_unfiltered2["nominal_xy_precision"] > df_unfiltered2["radius_m"], df_unfiltered2["nominal_xy_precision"], df_unfiltered2["radius_m"])
#df_unfiltered2["radius_m"] = np.where(df_unfiltered2["coordinatePrecision"] > df_unfiltered2["radius_m"], df_unfiltered2["coordinatePrecision"], df_unfiltered2["radius_m"])
# Test to make sure that no records were lost in the previous steps
if len(df_unfiltered2) != len(df_unfiltered):
print("AN ERROR OCCURRED !!!!!!!!!!!!!")
else:
print("Prepared records and calculated radii:" + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< FILTER
timestamp = datetime.now()
# Some filters to be prepped for use
for x in ['bases_omit', 'collection_codes_omit', 'datasets_omit',
'institutions_omit', 'issues_omit', 'sampling_protocols_omit']:
if filter_set[x] == None:
filter_set[x] = []
df_filter2 = (df_unfiltered2[df_unfiltered2['radius_m'] <= filter_set['max_coordinate_uncertainty']]
[lambda x: x['collectionCode'].isin(filter_set['collection_codes_omit']) == False]
[lambda x: x['institutionID'].isin(filter_set['institutions_omit']) == False]
[lambda x: x['basisOfRecord'].isin(filter_set['bases_omit']) == False]
[lambda x: x['samplingProtocol'].isin(filter_set['sampling_protocols_omit']) == False]
[lambda x: x['datasetName'].isin(filter_set['datasets_omit']) == False]
[lambda x: x['occurrenceStatus'] != "ABSENT"]
)
# Case where user demands records had coordinate uncertainty
if filter_set['has_coordinate_uncertainty'] == True:
df_filter2 = df_filter2[df_filter2["coordinateUncertaintyInMeters"] > 0]
''' ISSUES are more complex because multiple issues can be listed per record
Method used is complex, but hopefully faster than simple iteration over all records
'''
df_filter2.fillna(value={'issues': ""}, inplace=True)
# Format of issues entries differ by method, change json format to email
# format
if filter_set['get_dwca'] == True:
df_filter2['issues'] = [x.replace(', ', ';').replace('[', '').replace(']', '').replace("'", "")
for x in df_filter2['issues']]
unique_issue = list(df_filter2['issues'].unique())
violations = [x for x in unique_issue if len(set(str(x).split(";")) & set(filter_set['issues_omit'])) != 0] # entries that contain violations
df_filter3 = df_filter2[df_filter2['issues'].isin(violations) == False] # Records without entries that are violations.
print("Performed filtering: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< REMOVE SPACE-TIME DUPLICATES
# Prep some columns by changing data type
df_filter3 = (df_filter3
.astype({'decimalLatitude': 'str',
'decimalLongitude': 'str'})
.reset_index(drop=True))
if filter_set["duplicate_coord_date_OK"] == False:
df_filterZ = drop_duplicates_latlongdate(df_filter3)
if filter_set["duplicate_coord_date_OK"] == True:
df_filterZ = df_filter3.copy()
print("DUPLICATES ON LATITUDE, LONGITUDE, DATE-TIME INCLUDED")
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SPATIAL FILTERING
# Spatial filtering happens in the get functions (ebird and gbif), not here
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE VALUES AGAIN
timestamp = datetime.now()
# Store value summary in a data frame
if df_filterZ.empty == False:
retained = summarize_values(dataframe=df_filterZ, step='retained')
if df_filterZ.empty == True:
retained = acquired.copy().drop(["acquired"], axis=1)
retained["retained"] = 0
# Concat acquired and retained data frames
summary_df = pd.merge(retained, acquired, on=['attribute', 'value'],
how='inner')
# Calculate a difference column
summary_df['removed'] = summary_df['acquired'] - summary_df['retained']
summary_df = summary_df[['attribute', 'value', 'acquired', 'removed',
'retained']]
# Summarize sources
if df_filterZ.empty == False:
source_df2 = df_filterZ[['institutionID', 'collectionCode',
'datasetName', 'record_id']]
source_summary2 = (source_df2
.groupby(by=['institutionID', 'collectionCode',
'datasetName'])
.size()
.reset_index(name='retained'))
if df_filterZ.empty == True:
print(source_summary1)
source_summary2 = source_summary1.copy().drop(["acquired"], axis=1)
source_summary2["retained"] = 0
# Concat acquired and retained source summary data frames
source_summaries = pd.merge(source_summary1, source_summary2,
on=['institutionID', 'collectionCode',
'datasetName'],
how='inner')
# Calculate a difference column
source_summaries['removed'] = source_summaries['acquired'] - source_summaries['retained']
source_summaries = source_summaries[['institutionID', 'collectionCode',
'datasetName', 'acquired', 'removed',
'retained']]
# Save the summaries in the output database
summary_df.to_sql(name='attribute_value_counts', con=conn,
if_exists='replace')
source_summaries.to_sql(name='sources', con=conn,
if_exists='replace')
print("Saved summary of filtering results: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SAVE
# Reformat data to strings and insert into db.
df_filterZ.replace("nan",
pd.NA).applymap(str).to_sql(name='occurrence_records',
con=conn,
if_exists='replace')
conn.close()
return None
def nominal_precisions(longitude, latitude, produce):
'''
Calculates the nominal precisions based on WGS84 coordinates.
Method is based on information from wikipedia page on latitude and posts at
https://gis.stackexchange.com/questions/8650/measuring-accuracy-of-latitude-and-longitude
https://wiki.openstreetmap.org/wiki/Precision_of_coordinates
Parameters
----------
latitude : decimal degrees (EPSG:4326) latitude as string.
longitude : decimal degrees (EPSG:4326) longitude as string.
produce : 'longitude', 'latitude', or 'both'
Returns
-------
x : uncertainty in longitude (meters) as float.
y : uncertianty in latitude (meters) as float.
EXAMPLE
-------
x, y = nominal_precisions("-93.455", "26.3455", produce="both")
'''
lat = latitude.split(".")
long = longitude.split(".")
# Longitude - decimal gets moved based on digits.
digitsX = {1: 10, 2: 100, 3: 1000, 4: 10000, 5: 100000}
x = (111321 * np.cos(float(latitude) * np.pi/180))/digitsX[len(long[1])]
# Latitude lookup
digitsY = {1: 11112.0, 2: 1111.2, 3: 111.1, 4: 11.1, 5: 1.1}
y = digitsY[len(lat[1])]
if produce == "both":
return x, y
if produce == "longitude":
return x
if produce == "latitude":
return y
def drop_duplicates_latlongdate(df):
'''
Function to find and remove duplicate occurrence records within the
wildlife wrangler workflow. When duplicates exist, the record with the
higher decimal precision is kept, and if precision values are equal, then
the record with the smallest radius_m is retained. Accounts for existence
of records with a mix of decimal precision in latitude and longitude
values. The process is a little complex. The first data frame is cleaned
up by dropping duplicates based on which record has smaller buffer radius.
Before doing that, records with unequal decimal precision in the latitude
and longitude fields and those fields are rounded to the coarser
precision present. An input data frame likely contains records with equal
decimal precision in latitude and longitude fields, but that is lower than
the rest (i.e. latitude and longitude have 3 places right of the decimal
whereas most records have 4). Duplication may occur between lower and
higher precision records at the lower precision. Therefore, duplication
must be assessed at each of the lower precision levels present. The
strategy for that is to, at each precision level, split the main data
frame in two: one with records having the precision level of the
investigation and another with records greater than the precision level.
The "greater than" data frame records' latitude and longitude values are
then rounded to the precision level. Records are identified from the
"equals precision" data frame that have their latitude, longitude, and date
values represented in the "greater than" df, and such records ID’s are
collected in a list of records to be removed from the input/main data
frame. This process is iterated over all precision levels present in the
data.
Parameters
----------
df : input pandas data frame.
Returns
-------
df2 : a data frame equal to df but without duplicates. Use to drop records
from the occurrences table.
'''
startduptime = datetime.now()
# Record df length before removing duplicates
initial_length = len(df)
"""
############ RECTIFY UNEQUAL LAT-LONG PRECISION
First, trim decimal length in cases where decimal length differs between
latitude and longitude values, result is equal latitude and longitude
length. Record the trimmed decimal precision in a temp column for use
later as a record to "verbatim" precision.
"""
df['dup_latPlaces'] = [len(x.split(".")[1]) for x in df['decimalLatitude']]
df['dup_lonPlaces'] = [len(x.split(".")[1]) for x in df['decimalLongitude']]
df['dup_OGprec'] = df['dup_latPlaces']
prec_unequal = df[df['dup_latPlaces'] != df['dup_lonPlaces']]
for i in prec_unequal.index:
x = prec_unequal.loc[i]
if x['dup_latPlaces'] < x['dup_lonPlaces']:
trim_len = int(x['dup_latPlaces'])
else:
trim_len = int(x['dup_lonPlaces'])
df.loc[i, 'decimalLatitude'] = x['decimalLatitude'][:trim_len + 3]
df.loc[i, 'decimalLongitude'] = x['decimalLongitude'][:trim_len + 4]
# Record the resulting precision for reference later
df.loc[i, 'dup_OGprec'] = trim_len
df.drop(['dup_latPlaces', 'dup_lonPlaces'], axis=1, inplace=True)
"""
######## INITIAL DROP OF DUPLICATES
Initial drop of duplicates on 'latitude', 'longitude', 'eventDate',
keeping the first (lowest radius_m)
Sort so that the lowest radius_m is first
"""
df = (df
.sort_values(by=['decimalLatitude', 'decimalLongitude', 'eventDate',
'radius_m'],
ascending=True, kind='mergesort', na_position='last')
.drop_duplicates(subset=['decimalLatitude', 'decimalLongitude',
'eventDate'],
keep='first'))
"""
######### FIND IMPRECISE DUPLICATES
Get a list of "verbatim" precisions that are present in the data to loop
through. Next, iterate through this list collecting id's of records that
need to be removed from the main df.
"""
# Get list of unique precisions. Order is important: descending.
precisions = list(set(df['dup_OGprec']))
precisions.sort(reverse=True)
# The highest precisions listed at this point has been done: drop it.
precisions = precisions[1:]
# List for collecting records that are duplicates
duplis = []
# The precision-specific duplicate testing happens repeatedly, so make it a
# function.
def drop_duplicate_coord_date(precision, df):
"""
Function to find undesirable duplicates at a particular decimal
precision
Parameters
----------
precision : The level of precision (places right of decimal) in
decimalLatitude and longitude values for the assessment of duplicates.
df : data frame to assess and drop duplicates from. This function
works 'inplace'.
Returns
-------
A data frame without duplicates
"""
# Create a df with records from the input df having decimal
# precision > the precision level being assessed.
dfLonger = df[df['dup_OGprec'] > precision].copy()
# Round lat and long values
dfLonger['decimalLatitude'] = [str(round(float(x), precision)) for x in dfLonger['decimalLatitude']]
dfLonger['decimalLongitude'] = [str(round(float(x), precision)) for x in dfLonger['decimalLongitude']]
# Create a df with records having the precision being
# investigated
dfShorter1 = df[df['dup_OGprec'] == precision]
# Find records in dfShorter1 with latitude, longitude, date combo
# existing in dfLonger and append to list of duplis
dfduplis = pd.merge(dfShorter1, dfLonger, how='inner',
on=['decimalLatitude', 'decimalLongitude',
'eventDate'])
dups_ids = dfduplis['record_id_x']
for d in dups_ids:
duplis.append(d)
# Drop latitude longitude duplicates at lower decimal precisions
for p in precisions:
drop_duplicate_coord_date(p, df)
# Drop rows from the current main df that were identified as duplicates
df2 = df[df['record_id'].isin(duplis) == False].copy()
# Drop excess columns
df2.drop(columns=['dup_OGprec'], axis=1, inplace=True)
# Print status
duptime = datetime.now() - startduptime
print(str(initial_length - len(df2))
+ " duplicate records dropped: {0}".format(duptime))
return df2
def verify_results(database):
'''
Compares the occurrence record attributes to the filters that were
supposed to be applied.
Parameters
----------
database : path to a wrangler output database; string.
Like "Z:/Occurrence_Records/test1.sqlite"
RESULTS
-------
prints messages if tests are failed. No output indicates all tests were
passed.
'''
# Connect to a database
conn = sqlite3.connect(database)
# Get the taxon concept ---------------------------------------------------
taxon_concept = ( | pd.read_sql(sql="SELECT * FROM taxon_concept;", con=conn) | pandas.read_sql |
import dask.array as da
import dask.dataframe as dd
import numpy as np
import numpy.linalg as LA
import pandas as pd
import pytest
import sklearn.linear_model
from dask.dataframe.utils import assert_eq
from dask_glm.regularizers import Regularizer
from sklearn.pipeline import make_pipeline
import dask_ml.linear_model
from dask_ml.datasets import make_classification, make_counts, make_regression
from dask_ml.linear_model import LinearRegression, LogisticRegression, PoissonRegression
from dask_ml.linear_model.utils import add_intercept
from dask_ml.model_selection import GridSearchCV
@pytest.fixture(params=[r() for r in Regularizer.__subclasses__()])
def solver(request):
"""Parametrized fixture for all the solver names"""
return request.param
@pytest.fixture(params=[r() for r in Regularizer.__subclasses__()])
def regularizer(request):
"""Parametrized fixture for all the regularizer names"""
return request.param
class DoNothingTransformer:
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
def fit_transform(self, X, y=None):
return X
def get_params(self, deep=True):
return {}
def test_lr_init(solver):
LogisticRegression(solver=solver)
def test_pr_init(solver):
PoissonRegression(solver=solver)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_fit(fit_intercept, solver):
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
lr = LogisticRegression(fit_intercept=fit_intercept)
lr.fit(X, y)
lr.predict(X)
lr.predict_proba(X)
@pytest.mark.parametrize(
"solver", ["admm", "newton", "lbfgs", "proximal_grad", "gradient_descent"]
)
def test_fit_solver(solver):
import dask_glm
import packaging.version
if packaging.version.parse(dask_glm.__version__) <= packaging.version.parse(
"0.2.0"
):
pytest.skip("FutureWarning for dask config.")
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
lr = LogisticRegression(solver=solver)
lr.fit(X, y)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_lm(fit_intercept):
X, y = make_regression(n_samples=100, n_features=5, chunks=50)
lr = LinearRegression(fit_intercept=fit_intercept)
lr.fit(X, y)
lr.predict(X)
if fit_intercept:
assert lr.intercept_ is not None
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_big(fit_intercept):
X, y = make_classification(chunks=50)
lr = LogisticRegression(fit_intercept=fit_intercept)
lr.fit(X, y)
lr.decision_function(X)
lr.predict(X)
lr.predict_proba(X)
if fit_intercept:
assert lr.intercept_ is not None
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_poisson_fit(fit_intercept):
X, y = make_counts(n_samples=100, chunks=500)
pr = PoissonRegression(fit_intercept=fit_intercept)
pr.fit(X, y)
pr.predict(X)
pr.get_deviance(X, y)
if fit_intercept:
assert pr.intercept_ is not None
def test_in_pipeline():
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
pipe = make_pipeline(DoNothingTransformer(), LogisticRegression())
pipe.fit(X, y)
def test_gridsearch():
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
grid = {"logisticregression__C": [1000, 100, 10, 2]}
pipe = make_pipeline(DoNothingTransformer(), LogisticRegression())
search = GridSearchCV(pipe, grid, cv=3)
search.fit(X, y)
def test_add_intercept_dask_dataframe():
X = dd.from_pandas(pd.DataFrame({"A": [1, 2, 3]}), npartitions=2)
result = add_intercept(X)
expected = dd.from_pandas(
pd.DataFrame(
{"intercept": [1, 1, 1], "A": [1, 2, 3]}, columns=["intercept", "A"]
),
npartitions=2,
)
assert_eq(result, expected)
df = dd.from_pandas( | pd.DataFrame({"intercept": [1, 2, 3]}) | pandas.DataFrame |
from __future__ import division
import numpy as np
import pandas as pd
import pickle
import os
from math import ceil
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.metrics import r2_score
warnings.simplefilter("ignore")
# colors = ["#3366cc", "#dc3912", "#109618", "#990099", "#ff9900"]
colors = sns.color_palette('muted')
labels = ['Remaining', 'First','Last']
def density_plot(df, Accuracy_base, Accuracy_LSTM, Accuracy_NG, save_fig, Out_put_name,model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM, Accuracy_NG]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i],linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.1, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.1, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.ylabel('Density', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('First activities',fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Middle']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i], linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2, alpha = 1)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.0, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
plt.ylabel('Density', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('Remaining activities',fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_duration_error(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('First Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Remaining']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (User-level)', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('Remaining Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_not_seperate_mid_first(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(7, 7))
ax1 = plt.subplot(1, 1, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['all']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
med = data.mean()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18) #
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def data_process_continuous(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])/3600
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])/3600
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct']))/data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp,accuracy_all
def calculate_error(result_df):
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
######
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration']) ** 2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs'] / result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
if len(result_df) > 0:
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
else:
R_sq = None
return RMSE, MAPE, MAE, R_sq
def r_sq_for_two_parts(data,y_mean):
data['RES'] = (data['Ground_truth_duration'] - data['Predict_duration'])**2
data['TOT'] = (data['Ground_truth_duration'] - y_mean)**2
R_sq = 1 - sum(data['RES'])/sum(data['TOT'])
return R_sq
def data_process_continuous_R_sq(data):
_, _, _, R_sq_all = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
mean_y = np.mean(data['Ground_truth_duration'])
R_sq_first = r_sq_for_two_parts(data_first, mean_y)
if len(data_Remaining)>0:
R_sq_Remaining = r_sq_for_two_parts(data_Remaining, mean_y)
else:
R_sq_Remaining = None
return R_sq_first, R_sq_Remaining, R_sq_all
def data_process_continuous_RMSE(data):
RMSE_all, _, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
RMSE_first, _, _, R_sq_first = calculate_error(data_first)
RMSE_Remaining, _, _, R_sq_Remaining = calculate_error(data_Remaining)
return RMSE_first, RMSE_Remaining, RMSE_all
def data_process_continuous_MAPE(data):
_, MAPE_all, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
_, MAPE_first, _, R_sq_first = calculate_error(data_first)
_, MAPE_Remaining, _, R_sq_Remaining = calculate_error(data_Remaining)
return MAPE_first, MAPE_Remaining, MAPE_all
def data_process_discrete(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct'])) / data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all
def generate_accuracy_file(individual_ID_list, output_fig, duration_error):
error_list=[]
total=0
error_Remaining = | pd.DataFrame({'Remaining':[]}) | pandas.DataFrame |
import pandas
from collections import Counter
from tqdm import tqdm
user_df = pandas.read_csv('processed_data/prj_user.csv')
tweets_df = pandas.read_csv('original_data/prj_tweet.csv')
ids = user_df["id"]
ids = list(ids.values)
hobby_1_list = []
hobby_2_list = []
def get_users_most_popular_hashtags_list(tweets_df, user_id, number_of_wanted_hashtags=2):
"""
:param user_id: id of user for which we want to get
:return: list of strings, size of number_of_wanted_hashtags or smaller if user doesn't have enough hashtags
"""
tweets = list(tweets_df.loc[tweets_df['userID'] == int(user_id)]["tweet"].values)
tweets_with_hashtag = [tweet for tweet in tweets if "#" in tweet]
user_hashtags = []
for tweet in tweets_with_hashtag:
user_hashtags += [i[1:] for i in tweet.split() if i.startswith("#")]
users_most_common_hashtags = [word for word, word_count in Counter(user_hashtags).most_common(number_of_wanted_hashtags)]
return users_most_common_hashtags
for id in tqdm(ids):
users_most_common_hashtags = get_users_most_popular_hashtags_list(tweets_df=tweets_df, user_id = id, number_of_wanted_hashtags=2)
if len(users_most_common_hashtags) < 2:
while len(users_most_common_hashtags) < 2:
users_most_common_hashtags.append(None)
hobby_1_list.append(users_most_common_hashtags[0])
hobby_2_list.append(users_most_common_hashtags[1])
hobby_df = | pandas.read_csv('processed_data/prj_user.csv') | pandas.read_csv |
import sys, os
import unittest
import pandas as pd
import numpy
import sys
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer, MinMaxScaler, MaxAbsScaler, RobustScaler,\
Binarizer, PolynomialFeatures, OneHotEncoder, KBinsDiscretizer
from sklearn_pandas import CategoricalImputer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn_pandas import DataFrameMapper
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier,\
RandomForestRegressor, IsolationForest
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
from nyoka.preprocessing import Lag
from nyoka import skl_to_pmml
from nyoka import PMML44 as pml
class TestMethods(unittest.TestCase):
def test_sklearn_01(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_pmml.pmml"
model = SVC()
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
## 1
svms = pmml_obj.SupportVectorMachineModel[0].SupportVectorMachine
for mod_val, recon_val in zip(model.intercept_, svms):
self.assertEqual("{:.16f}".format(mod_val), "{:.16f}".format(recon_val.Coefficients.absoluteValue))
## 2
svm = pmml_obj.SupportVectorMachineModel[0]
self.assertEqual(svm.RadialBasisKernelType.gamma,model._gamma)
def test_sklearn_02(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "knn_pmml.pmml"
pipeline_obj = Pipeline([
('scaling',StandardScaler()),
('knn',KNeighborsClassifier(n_neighbors = 5))
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertIsNotNone(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.euclidean)
##2
self.assertEqual(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.kind, "distance")
##3
self.assertEqual(pipeline_obj.steps[-1][-1].n_neighbors, pmml_obj.NearestNeighborModel[0].numberOfNeighbors)
def test_sklearn_03(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "rf_pmml.pmml"
model = RandomForestClassifier(n_estimators = 100)
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("rfc", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
self.assertEqual(model.n_estimators,pmml_obj.MiningModel[0].Segmentation.Segment.__len__())
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "majorityVote")
def test_sklearn_04(self):
titanic = pd.read_csv("nyoka/tests/titanic_train.csv")
features = titanic.columns
target = 'Survived'
f_name = "gb_pmml.pmml"
pipeline_obj = Pipeline([
("imp", Imputer(strategy="median")),
("gbc", GradientBoostingClassifier(n_estimators = 10))
])
pipeline_obj.fit(titanic[features],titanic[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "modelChain")
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment.__len__(), 2)
##3
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment[1].RegressionModel.normalizationMethod, "logit")
def test_sklearn_05(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg'],axis=1)
y = df['mpg']
features = [name for name in df.columns if name not in ('mpg')]
target = 'mpg'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', TfidfVectorizer())
])),
('model',DecisionTreeRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"dtr_pmml.pmml")
self.assertEqual(os.path.isfile("dtr_pmml.pmml"),True)
def test_sklearn_06(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
f_name = "linearregression_pmml.pmml"
model = LinearRegression()
pipeline_obj = Pipeline([
('model',model)
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name, True)
## 1
reg_tab = pmml_obj.RegressionModel[0].RegressionTable[0]
self.assertEqual(reg_tab.intercept,model.intercept_)
## 2
for model_val, pmml_val in zip(model.coef_, reg_tab.NumericPredictor):
self.assertEqual("{:.16f}".format(model_val),"{:.16f}".format(pmml_val.coefficient))
def test_sklearn_07(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "logisticregression_pmml.pmml"
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("lr", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
segmentation = pmml_obj.MiningModel[0].Segmentation
self.assertEqual(segmentation.Segment.__len__(), model.classes_.__len__()+1)
## 2
self.assertEqual(segmentation.multipleModelMethod, "modelChain")
##3
self.assertEqual(segmentation.Segment[-1].RegressionModel.normalizationMethod, "simplemax")
##4
for i in range(model.classes_.__len__()):
self.assertEqual(segmentation.Segment[i].RegressionModel.normalizationMethod, "logit")
self.assertEqual("{:.16f}".format(model.intercept_[i]),\
"{:.16f}".format(segmentation.Segment[i].RegressionModel.RegressionTable[0].intercept))
def test_sklearn_08(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = [i%2 for i in range(iris.data.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('pca',PCA(2)),
('mod',LogisticRegression())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "logisticregression_pca_pmml.pmml")
self.assertEqual(os.path.isfile("logisticregression_pca_pmml.pmml"),True)
def test_sklearn_09(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdclassifier_pmml.pmml")
self.assertEqual(os.path.isfile("sgdclassifier_pmml.pmml"),True)
def test_sklearn_10(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("lsvc", LinearSVC())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "linearsvc_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvc_pmml.pmml"),True)
def test_sklearn_11(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',LinearSVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"linearsvr_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvr_pmml.pmml"),True)
def test_sklearn_12(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',GradientBoostingRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"gbr.pmml")
self.assertEqual(os.path.isfile("gbr.pmml"),True)
def test_sklearn_13(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", DecisionTreeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "dtr_clf.pmml")
self.assertEqual(os.path.isfile("dtr_clf.pmml"),True)
def test_sklearn_14(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',RandomForestRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"rfr.pmml")
self.assertEqual(os.path.isfile("rfr.pmml"),True)
def test_sklearn_15(self):
df = | pd.read_csv('nyoka/tests/auto-mpg.csv') | pandas.read_csv |
"""
Module: LMR_proxy_preprocess.py
Purpose: Takes proxy data in their native format (e.g. .pckl file for PAGES2k or collection of
NCDC-templated .txt files) and generates Pandas DataFrames stored in pickle files
containing metadata and actual data from proxy records. The "pickled" DataFrames
are used as input by the Last Millennium Reanalysis software.
Currently, the data is stored as *annual averages* for original records with
subannual data.
Originator : <NAME> | Dept. of Atmospheric Sciences, Univ. of Washington
| January 2016
(Based on code written by <NAME> (U. of Washington) to handle
PAGES(2013) proxies)
Revisions :
- Addition of proxy types corresponding to "deep-times" proxy records, which are
being included in the NCDC-templated proxy collection.
[R. Tardif, U. of Washington, March 2017]
- Addition of recognized time/age definitions used in "deep-times" proxy records
and improved conversion of time/age data to year CE (convention used in LMR).
[R. Tardif, U. of Washington, March 2017]
- Improved detection & treatment of missing data, now using tags found
(or not) in each data file.
[R. Tardif, U. of Washington, March 2017]
- Added functionalities related to the merging of proxies coming from two
sources (PAGES2k phase 2 data contained in a single compressed pickle file
and "in-house" collections contained in NCDC-templated text files).
The possibility to "gaussianize" records and to calculate annual averages
on "tropical year" (Apr to Mar) or calendar year have also been implemented.
[R. Tardif, U. of Washington, Michael Erb, USC, May 2017]
- Renamed the proxy databases to less-confusing convention.
'pages' renamed as 'PAGES2kv1' and 'NCDC' renamed as 'LMRdb'
[R. Tardif, U. of Washington, Sept 2017]
"""
import glob
import os
import os.path
import numpy as np
import pandas as pd
import time as clock
from copy import deepcopy
from scipy import stats
import string
import re
import six
import ast
from os.path import join
import pickle as pickle
import gzip
import calendar
# LMR imports
from LMR_utils import gaussianize
# =========================================================================================
class EmptyError(Exception):
print(Exception)
# =========================================================================================
# ---------------------------------------- MAIN -------------------------------------------
# =========================================================================================
def main():
# ********************************************************************************
# Section for User-defined options: begin
#
#proxy_data_source = 'PAGES2Kv1' # proxies from PAGES2k phase 1 (2013)
# --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** ---
proxy_data_source = 'LMRdb' # proxies from PAGES2k phase 2 (2017) +
# "in-house" collection in NCDC-templated files
# Determine which dataset(s) (NCDC and/or PAGES2kv2) to include in the DF.
# - Both : include_NCDC = True, include_PAGES2kphase2 = True
# - Only NCDC : include_NCDC = True, include_PAGES2kphase2 = False
# - Only PAGES2kv2 : include_NCDC = False, include_PAGES2kphase2 = True
include_NCDC = True
include_PAGES2kphase2 = True
#PAGES2kphase2file = 'PAGES2k_v2.0.0_tempOnly.pklz' # compressed version of the file
PAGES2kphase2file = 'PAGES2k_v2.0.0_tempOnly.pckl'
# version of the LMRdb proxy db to process
# - first set put together, including PAGES2k2013 trees
#LMRdb_dbversion = 'v0.0.0'
# - PAGES2k2013 trees taken out, but with NCDC-templated records from PAGES2k phase 2, version 1.9.0
#LMRdb_dbversion = 'v0.1.0'
# - NCDC collection for LMR + published PAGES2k phase 2 proxies (version 2.0.0). stored in .pklz file
#LMRdb_dbversion = 'v0.2.0'
#LMRdb_dbversion = 'v0.3.0'
# LMRdb_dbversion = 'v0.4.0'
LMRdb_dbversion = 'v1.0.0'
# File containing info on duplicates in proxy records
infoDuplicates = 'Proxy_Duplicates_PAGES2kv2_NCDC_LMR'+LMRdb_dbversion+'.xlsx'
# This option transforms all data to a Gaussian distribution. It should only be used for
# linear regressions, not physically-based PSMs.
gaussianize_data = False
# Specify the type of year to use for data averaging. "calendar year" (Jan-Dec)
# or "tropical year" (Apr-Mar)
year_type = "calendar year"
#year_type = "tropical year"
eliminate_duplicates = True
# --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** ---
#proxy_data_source = 'DTDA'
dtda_dbversion = 'v0.0.0'
# --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** ---
# datadir: directory where the original proxy datafiles are located
datadir = '/home/katabatic/wperkins/data/LMR/data/proxies/'
# outdir: directory where the proxy database files will be created
# The piece before /data/proxies should correspond to your "lmr_path" set in LMR_config.py
outdir = '/home/katabatic/wperkins/data/LMR/data/proxies/'
#
# Section for User-defined options: end
# ***************************************************************
main_begin_time = clock.time()
# first checking that input and output directories exist on disk
if not os.path.isdir(datadir):
print('ERROR: Directory <<datadir>> does not exist. Please revise your'
' entry for this user-defined parameter.')
raise SystemExit(1)
else:
# check that datadir ends with '/' -> expected thereafter
if not datadir[-1] == '/':
datadir = datadir+'/'
if not os.path.isdir(outdir):
print('ERROR: Directory <<outdir>> does not exist. Please revise your'
' entry for this user-defined parameter.')
raise SystemExit(1)
else:
# check that outdir ends with '/' -> expected thereafter
if not outdir[-1] == '/':
outdir = outdir+'/'
if proxy_data_source == 'PAGES2Kv1':
# ============================================================================
# PAGES2Kv1 proxy data -------------------------------------------------------
# ============================================================================
take_average_out = False
fname = datadir + 'Pages2k_DatabaseS1-All-proxy-records.xlsx'
meta_outfile = outdir + 'Pages2kv1_Metadata.df.pckl'
outfile = outdir + 'Pages2kv1_Proxies.df.pckl'
pages_xcel_to_dataframes(fname, meta_outfile, outfile, take_average_out)
elif proxy_data_source == 'LMRdb':
# ============================================================================
# LMRdb proxy data -----------------------------------------------------------
# ============================================================================
datadir = datadir+'LMRdb/ToPandas_'+LMRdb_dbversion+'/'
infoDuplicates = datadir+infoDuplicates
# Some checks
if not os.path.isdir(datadir):
print('ERROR: Directory % is not found. Directory structure'
' <<datadir>>/LMRdb/ToPandas_vX.Y.Z is expected.'
' Please revise your set-up.' %datadir)
raise SystemExit(1)
if eliminate_duplicates and not os.path.isfile(infoDuplicates):
print('ERROR: eliminate_duplicates parameter set to True but'
' required file %s not found! Please rectify.' %infoDuplicates)
raise SystemExit(1)
meta_outfile = outdir + 'LMRdb_'+LMRdb_dbversion+'_Metadata.df.pckl'
data_outfile = outdir + 'LMRdb_'+LMRdb_dbversion+'_Proxies.df.pckl'
# Specify all proxy types & associated proxy measurements to look for & extract from the data files
# This is to take into account all the possible different names found in the PAGES2kv2 and NCDC data files.
proxy_def = \
{
#old 'Tree Rings_WidthPages' : ['TRW','ERW','LRW'],\
'Tree Rings_WidthPages2' : ['trsgi'],\
'Tree Rings_WidthBreit' : ['trsgi'],\
'Tree Rings_WoodDensity' : ['max_d','min_d','early_d','earl_d','late_d','MXD','density'],\
'Tree Rings_Isotopes' : ['d18O'],\
'Corals and Sclerosponges_d18O' : ['d18O','delta18O','d18o','d18O_stk','d18O_int','d18O_norm','d18o_avg','d18o_ave','dO18','d18O_4'],\
'Corals and Sclerosponges_SrCa' : ['Sr/Ca','Sr_Ca','Sr/Ca_norm','Sr/Ca_anom','Sr/Ca_int'],\
'Corals and Sclerosponges_Rates' : ['ext','calc','calcification','calcification rate', 'composite'],\
'Ice Cores_d18O' : ['d18O','delta18O','delta18o','d18o','d18o_int','d18O_int','d18O_norm','d18o_norm','dO18','d18O_anom'],\
'Ice Cores_dD' : ['deltaD','delD','dD'],\
'Ice Cores_Accumulation' : ['accum','accumu'],\
'Ice Cores_MeltFeature' : ['MFP','melt'],\
'Lake Cores_Varve' : ['varve', 'varve_thickness', 'varve thickness', 'thickness'],\
'Lake Cores_BioMarkers' : ['Uk37', 'TEX86', 'tex86'],\
'Lake Cores_GeoChem' : ['Sr/Ca', 'Mg/Ca','Cl_cont'],\
'Lake Cores_Misc' : ['RABD660_670','X_radiograph_dark_layer','massacum'],\
'Marine Cores_d18O' : ['d18O'],\
'Marine Cores_tex86' : ['tex86'],\
'Marine Cores_uk37' : ['uk37','UK37'],\
'Speleothems_d18O' : ['d18O'],\
'Bivalve_d18O' : ['d18O'],\
# DADT proxies
# 'Marine Cores_d18Opachyderma' : ['d18O_pachyderma'],\
# 'Marine Cores_d18Obulloides' : ['d18O_bulloides'],\
# 'Marine Cores_tex86' : ['tex86'],\
# Proxy types present in the database but which should not be included/assimilated
# 'Corals and Sclerosponges_d14C' : ['d14C','d14c','ac_d14c'],\
# 'Corals and Sclerosponges_d13C' : ['d13C','d13c','d13c_ave','d13c_ann_ave','d13C_int'],\
# 'Corals and Sclerosponges_Sr' : ['Sr'],\
# 'Corals and Sclerosponges_BaCa' : ['Ba/Ca'],\
# 'Corals and Sclerosponges_CdCa' : ['Cd/Ca'],\
# 'Corals and Sclerosponges_MgCa' : ['Mg/Ca'],\
# 'Corals and Sclerosponges_UCa' : ['U/Ca','U/Ca_anom'],\
# 'Corals and Sclerosponges_Pb' : ['Pb'],\
# 'Speleothems_d13C' : ['d13C'],\
# 'Borehole_Temperature' : ['temperature'],\
# 'Hybrid_Temperature' : ['temperature'],\
# 'Documents_Temperature' : ['temperature'],\
# 'Tree Rings_Temperature' : ['temperature'],\
# 'Lake Cores_Temperature' : ['temperature'],\
# 'Marine Cores_Temperature' : ['temperature'],\
# 'Corals and Sclerosponges_Temperature' : ['temperature'],\
# 'Climate Reconstructions' : ['sst_ORSTOM','sss_ORSTOM','temp_anom'],\
}
# --- data from LMR's NCDC-templated files
if include_NCDC:
ncdc_dict = ncdc_txt_to_dict(datadir, proxy_def, year_type, gaussianize_data)
else:
ncdc_dict = []
# --- PAGES2k phase2 (2017) data
if include_PAGES2kphase2:
pages2kv2_dict = pages2kv2_pickle_to_dict(datadir, PAGES2kphase2file, proxy_def, year_type, gaussianize_data)
else:
pages2kv2_dict = []
# --- Merge datasets, scrub duplicates and write metadata & data to file
merge_dicts_to_dataframes(proxy_def, ncdc_dict, pages2kv2_dict, meta_outfile, data_outfile, infoDuplicates, eliminate_duplicates)
elif proxy_data_source == 'DTDA':
# ============================================================================
# DTDA project proxy data ----------------------------------------------------
# ============================================================================
take_average_out = False
datadir = datadir+'DTDA/'
fname = datadir + 'DTDA_proxies_'+dtda_dbversion+'.xlsx'
meta_outfile = outdir + 'DTDA_'+dtda_dbversion+'_Metadata.df.pckl'
outfile = outdir + 'DTDA_'+dtda_dbversion+'_Proxies.df.pckl'
DTDA_xcel_to_dataframes(fname, meta_outfile, outfile, take_average_out)
else:
raise SystemExit('ERROR: Unkown proxy data source! Exiting!')
elapsed_time = clock.time() - main_begin_time
print('Build of integrated proxy database completed in %s mins' %str(elapsed_time/60.))
# =========================================================================================
# ------------------------------------- END OF MAIN ---------------------------------------
# =========================================================================================
# =========================================================================================
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# =========================================================================================
def compute_annual_means(time_raw,data_raw,valid_frac,year_type):
"""
Computes annual-means from raw data.
Inputs:
time_raw : Original time axis
data_raw : Original data
valid_frac : The fraction of sub-annual data necessary to create annual mean. Otherwise NaN.
year_type : "calendar year" (Jan-Dec) or "tropical year" (Apr-Mar)
Outputs: time_annual, data_annual
Authors: <NAME>, Univ. of Washington; <NAME>, Univ. of Southern California
"""
# Check if dealing with multiple chronologies in one data stream (for NCDC files)
array_shape = data_raw.shape
if len(array_shape) == 2:
nbtimes, nbvalid = data_raw.shape
elif len(array_shape) == 1:
nbtimes, = data_raw.shape
nbvalid = 1
else:
raise SystemExit('ERROR in compute_annual_means: Unrecognized shape of data input array.')
time_between_records = np.diff(time_raw, n=1)
# Temporal resolution of the data, calculated as the mode of time difference.
time_resolution = abs(stats.mode(time_between_records)[0][0])
# check if time_resolution = 0.0 !!! sometimes adjacent records are tagged at same time ...
if time_resolution == 0.0:
print('***WARNING! Found adjacent records with same times!')
inderr = np.where(time_between_records == 0.0)
print(inderr)
time_between_records = np.delete(time_between_records,inderr)
time_resolution = abs(stats.mode(time_between_records)[0][0])
max_nb_per_year = int(1.0/time_resolution)
if time_resolution <=1.0:
proxy_resolution = int(1.0) # coarse-graining to annual
else:
proxy_resolution = int(time_resolution)
# Get rounded integer values of all years present in record.
years_all = [int(np.floor(time_raw[k])) for k in range(0,len(time_raw))]
years = list(set(years_all)) # 'set' is used to get unique values in list
years = sorted(years) # sort the list
years = np.insert(years,0,years[0]-1) # <NAME>
# bounds, for calendar year : [years_beg,years_end[
years_beg = np.asarray(years,dtype=np.float64) # inclusive lower bound
years_end = years_beg + 1. # exclusive upper bound
# If some of the time values are floats (sub-annual resolution)
# and year_type is tropical_year, adjust the years to cover the
# tropical year (Apr-Mar).
if np.equal(np.mod(time_raw,1),0).all() == False and year_type == 'tropical year':
print("Tropical year averaging...")
# modify bounds defining the "year"
for i, yr in enumerate(years):
# beginning of interval
if calendar.isleap(yr):
years_beg[i] = float(yr)+((31+29+31)/float(366))
else:
years_beg[i] = float(yr)+((31+28+31)/float(365))
# end of interval
if calendar.isleap(yr+1):
years_end[i] = float(yr+1)+((31+29+31)/float(366))
else:
years_end[i] = float(yr+1)+((31+28+31)/float(365))
time_annual = np.asarray(years,dtype=np.float64)
data_annual = np.zeros(shape=[len(years),nbvalid], dtype=np.float64)
# fill with NaNs for default values
data_annual[:] = np.NAN
# Calculate the mean of all data points with the same year.
for i in range(len(years)):
ind = [j for j, year in enumerate(time_raw) if (year >= years_beg[i]) and (year < years_end[i])]
nbdat = len(ind)
# TODO: check nb of non-NaN values !!!!! ... ... ... ... ... ...
if time_resolution <= 1.0:
frac = float(nbdat)/float(max_nb_per_year)
if frac > valid_frac:
data_annual[i,:] = np.nanmean(data_raw[ind],axis=0)
else:
if nbdat > 1:
print('***WARNING! Found multiple records in same year in data with multiyear resolution!')
print(' year= %d %d' %(years[i], nbdat))
# Note: this calculates the mean if multiple entries found
data_annual[i,:] = np.nanmean(data_raw[ind],axis=0)
# check and modify time_annual array to reflect only the valid data present in the annual record
# for correct tagging of "Oldest" and "Youngest" data
indok = np.where(np.isfinite(data_annual))[0]
keep = np.arange(indok[0],indok[-1]+1,1)
return time_annual[keep], data_annual[keep,:], proxy_resolution
# ===================================================================================
# For PAGES2k S1 proxy data ---------------------------------------------------------
# ===================================================================================
def pages_xcel_to_dataframes(filename, metaout, dataout, take_average_out):
"""
Takes in Pages2K CSV and converts it to dataframe storage. This increases
size on disk due to the joining along the time index (lots of null values).
Makes it easier to query and grab data for the proxy experiments.
:param filename:
:param metaout:
:param dataout:
:return:
Author: <NAME>, Univ. of Washington
"""
# check that file <<filename>> exists
if not os.path.isfile(filename):
print('ERROR: File %s does not exist. Please make sure'
' input file is located in right directory.' %filename)
raise SystemExit(1)
meta_sheet_name = 'Metadata'
metadata = pd.read_excel(filename, meta_sheet_name)
# rename 'PAGES ID' column header to more general 'Proxy ID'
metadata.rename(columns = {'PAGES ID':'Proxy ID'},inplace=True)
metadata.to_pickle(metaout)
record_sheet_names = ['AntProxies', 'ArcProxies', 'AsiaProxies',
'AusProxies', 'EurProxies', 'NAmPol', 'NAmTR',
'SAmProxies']
for i, sheet in enumerate(record_sheet_names):
tmp = pd.read_excel(filename, sheet)
# for key, series in tmp.iteritems():
# h5store[key] = series[series.notnull()]
if i == 0:
df = tmp
else:
# SQL like table join along index
df = df.merge(tmp, how='outer', on='PAGES 2k ID')
#fix index and column name
col0 = df.columns[0]
newcol0 = df[col0][0]
df.set_index(col0, drop=True, inplace=True)
df.index.name = newcol0
df = df.ix[1:]
df.sort_index(inplace=True)
if take_average_out:
# copy of dataframe
df_tmp = df
# fill dataframe with new values where temporal averages over proxy records are subtracted
df = df_tmp.sub(df_tmp.mean(axis=0), axis=1)
# TODO: make sure year index is consecutive
#write data to file
df = df.to_sparse()
df.to_pickle(dataout)
# ===================================================================================
# For PAGES2k v2.0.0 proxy data ---------------------------------------------------------
# ===================================================================================
def pages2kv2_pickle_to_dataframes(datadir, metaout, dataout, eliminate_duplicates, year_type, gaussianize_data):
"""
Takes in a Pages2k pckl file and converts it to dataframe storage.
Authors: <NAME>, Univ. of Washington, Jan 2016.
<NAME>, Univ. of Southern California, Feb 2017
"""
# ===============================================================================
# Upload proxy data from Pages2k v2 pickle file
# ===============================================================================
# Open the pickle file containing the Pages2k data
f = gzip.open(datadir+'PAGES2k_v2.0.0_tempOnly.pklz','rb')
pages2k_data = pickle.load(f)
f.close()
# ===============================================================================
# Produce a summary of uploaded proxy data &
# generate integrated database in pandas DataFrame format
# ===============================================================================
# Summary of the final_proxy_list
nbsites = len(pages2k_data)
print('----------------------------------------------------------------------')
print(' SUMMARY: ')
print(' Total nb of records : ', nbsites)
print(' ------------------------------------------------------')
tot = []
# Loop over proxy types specified in *main*
counter = 0
# Build up pandas DataFrame
metadf = pd.DataFrame()
headers = ['NCDC ID','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement','Resolution (yr)',\
'Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
'Seasonality', 'Databases']
nb = []
for counter in range(0,len(pages2k_data)):
#counter = 13 # An example of a sub-annual record
# Give each record a unique descriptive name
pages2k_data[counter]['siteID'] = "PAGES2kv2_"+pages2k_data[counter]['dataSetName']+"_"+pages2k_data[counter]['paleoData_pages2kID']+":"+pages2k_data[counter]['paleoData_variableName']
nb.append(pages2k_data[counter]['siteID'])
print("Processing metadata", counter+1, "/", len(pages2k_data), ":",
pages2k_data[counter]['paleoData_pages2kID'])
# If the time axis goes backwards (i.e. newer to older), reverse it.
if pages2k_data[counter]['year'][-1] - pages2k_data[counter]['year'][-2] < 0:
pages2k_data[counter]['year'].reverse()
pages2k_data[counter]['paleoData_values'].reverse()
# If subannual, average up to annual --------------------------------------------------------
time_raw = np.array(pages2k_data[counter]['year'],dtype=np.float)
data_raw = np.array(pages2k_data[counter]['paleoData_values'],dtype=np.float)
# Remove values where either time or data is nan.
nan_indices = np.isnan(time_raw)+np.isnan(data_raw)
time_raw = time_raw[~nan_indices]
data_raw = data_raw[~nan_indices]
valid_frac = 0.5
# Use the following function to make annual-means.
# Inputs: time_raw, data_raw, valid_frac, year_type. Outputs: time_annual, data_annual
time_annual, data_annual, proxy_resolution = compute_annual_means(time_raw,data_raw,valid_frac,year_type)
# If gaussianize_data is set to true, transform the proxy data to Gaussian.
# This option should only be used when using regressions, not physically-based PSMs.
if gaussianize_data == True:
data_annual = gaussianize.gaussianize(data_annual)
# Write the annual data to the dictionary, so they can use written to
# the data file outside of this loop.
pages2k_data[counter]['time_annual'] = time_annual
pages2k_data[counter]['data_annual'] = data_annual
# Rename the proxy types in the same convention as the NCDC dataset.
# Proxy types not renamed: bivalve, borehole, documents, hybrid
if (pages2k_data[counter]['archiveType'] == 'coral') or (pages2k_data[counter]['archiveType'] == 'sclerosponge'):
pages2k_data[counter]['archiveType'] = 'Corals and Sclerosponges'
elif pages2k_data[counter]['archiveType'] == 'glacier ice':
pages2k_data[counter]['archiveType'] = 'Ice Cores'
elif pages2k_data[counter]['archiveType'] == 'lake sediment':
pages2k_data[counter]['archiveType'] = 'Lake Cores'
elif pages2k_data[counter]['archiveType'] == 'marine sediment':
pages2k_data[counter]['archiveType'] = 'Marine Cores'
elif pages2k_data[counter]['archiveType'] == 'speleothem':
pages2k_data[counter]['archiveType'] = 'Speleothems'
elif pages2k_data[counter]['archiveType'] == 'tree':
pages2k_data[counter]['archiveType'] = 'Tree Rings'
# Rename some of the the proxy measurements to be more standard.
if (pages2k_data[counter]['archiveType'] == 'Ice Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'd18O1'):
pages2k_data[counter]['paleoData_variableName'] = 'd18O'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature1'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature3'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
# Not all records have data for elevation. In these cases, set elevation to nan.
if 'geo_meanElev' not in pages2k_data[counter]:
pages2k_data[counter]['geo_meanElev'] = np.nan
# Ensure lon is in [0,360] domain
if pages2k_data[counter]['geo_meanLon'] < 0.0:
pages2k_data[counter]['geo_meanLon'] = 360 + pages2k_data[counter]['geo_meanLon']
# Determine the seasonality of the record.
# Seasonal names were mapped the three-month climatological seasons.
# 'early summer' was mapped to the first two months of summer only. Is this right????????????
# 'growing season' was mapped to summer.
season_orig = pages2k_data[counter]['climateInterpretation_seasonality']
if any(char.isdigit() for char in season_orig):
pages2k_data_seasonality = map(int,season_orig.split(' '))
elif season_orig == 'annual':
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
elif season_orig == 'summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
elif season_orig == 'winter':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2]
else: pages2k_data_seasonality = [6,7,8]
elif season_orig == 'winter/spring':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2,3,4,5]
else: pages2k_data_seasonality = [6,7,8,9,10,11]
elif season_orig == 'early summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7]
else: pages2k_data_seasonality = [-12,1]
elif season_orig == 'growing season':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
else:
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
# Spell out the name of the interpretation variable.
if pages2k_data[counter]['climateInterpretation_variable'] == 'T':
pages2k_data[counter]['climateInterpretation_variable'] = 'temperature'
# Save to a dataframe
frame = pd.DataFrame({'a':pages2k_data[counter]['siteID'], 'b':pages2k_data[counter]['geo_siteName'], 'c':pages2k_data[counter]['geo_meanLat'], \
'd':pages2k_data[counter]['geo_meanLon'], 'e':pages2k_data[counter]['geo_meanElev'], \
'f':pages2k_data[counter]['archiveType'], 'g':pages2k_data[counter]['paleoData_variableName'], \
'h':proxy_resolution, 'i':pages2k_data[counter]['time_annual'][0], 'j':pages2k_data[counter]['time_annual'][-1], \
'k':pages2k_data[counter]['geo_pages2kRegion'], 'l':pages2k_data[counter]['climateInterpretation_variable'], \
'm':pages2k_data[counter]['climateInterpretation_variableDetail'], \
'n':pages2k_data[counter]['climateInterpretation_interpDirection'], 'o':None, 'p':None}, index=[counter])
# To get seasonality & databases *lists* into columns 'o' and 'p' of DataFrame
frame.set_value(counter,'o',pages2k_data_seasonality)
frame.set_value(counter,'p',['PAGES2kv2'])
# Append to main DataFrame
metadf = metadf.append(frame)
#print ' ', '{:40}'.format(key), ' : ', len(nb)
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' ','{:40}'.format('Total:'), ' : ', nbtot)
print('----------------------------------------------------------------------')
print(' ')
# Redefine column headers
metadf.columns = headers
# Write metadata to file
print('Now writing metadata to file:', metaout)
metadf.to_pickle(metaout)
# -----------------------------------------------------
# Build the proxy **data** DataFrame and output to file
# -----------------------------------------------------
print(' ')
print('Now creating & loading the data in the pandas DataFrame...')
print(' ')
for counter in range(0,len(pages2k_data)):
print("Processing metadata", counter+1, "/", len(pages2k_data), ":",
pages2k_data[counter]['paleoData_pages2kID'])
nbdata = len(pages2k_data[counter]['time_annual'])
# Load data in numpy array
frame_data = np.zeros(shape=[nbdata,2])
frame_data[:,0] = pages2k_data[counter]['time_annual']
frame_data[:,1] = pages2k_data[counter]['data_annual']
if counter == 0:
# Build up pandas DataFrame
header = ['NCDC ID', pages2k_data[counter]['siteID']]
df = pd.DataFrame({'a':frame_data[:,0], 'b':frame_data[:,1]})
df.columns = header
else:
frame = pd.DataFrame({'NCDC ID':frame_data[:,0], pages2k_data[counter]['siteID']:frame_data[:,1]})
df = df.merge(frame, how='outer', on='NCDC ID')
# Fix DataFrame index and column name
col0 = df.columns[0]
df.set_index(col0, drop=True, inplace=True)
df.index.name = 'Year C.E.'
df.sort_index(inplace=True)
# Write data to file
print('Now writing to file:', dataout)
df.to_pickle(dataout)
print(' ')
print('Done!')
# ===================================================================================
# For DTDA project proxy data -------------------------------------------------------
# ===================================================================================
def DTDA_xcel_to_dataframes(filename, metaout, dataout, take_average_out):
"""
Takes in Pages2K CSV and converts it to dataframe storage. This increases
size on disk due to the joining along the time index (lots of null values).
Makes it easier to query and grab data for the proxy experiments.
:param filename:
:param metaout:
:param dataout:
:return:
Author: <NAME>, Univ. of Washington
Based on pages_xcel_to_dataframes function written by
<NAME> (Univ. of Washington)
"""
meta_sheet_name = 'Metadata'
metadata = pd.read_excel(filename, meta_sheet_name)
# add a "Databases" column and set to LMR
metadata.loc[:,'Databases'] = '[LMR]'
# add a "Seasonality" column and set to [1,2,3,4,5,6,7,8,9,10,11,12]
metadata.loc[:,'Seasonality'] = '[1,2,3,4,5,6,7,8,9,10,11,12]'
metadata.loc[:,'Elev'] = 0.0
nbrecords = len(metadata)
# One proxy record per sheet, all named as DataXXX
record_sheet_names = ['Data'+str("{0:03d}".format(i+1)) for i in range(nbrecords)]
for i, sheet in enumerate(record_sheet_names):
pdata = pd.read_excel(filename, sheet)
# rounding age data to nearest year (<NAME>, pers. comm.)
age = (pdata[pdata.columns[0]][1:]).astype('float').round()
pdata[pdata.columns[0]][1:] = age
# -- just for print out - looking into time axis for each record
# age difference between consecutive data
diff = np.diff(pdata[pdata.columns[0]][1:], 1)
print('{:10s}'.format(pdata.columns[1]), ' : temporal resolution : mean=', '{:7.1f}'.format(np.mean(diff)), ' median=', '{:7.1f}'.format(np.median(diff)),\
' min=', '{:7.1f}'.format(np.min(diff)), ' max=', '{:7.1f}'.format(np.max(diff)))
resolution = np.mean(diff) # take average difference as representative "resolution"
# update resolution info in the metadata
metadata.loc[i,'Resolution (yr)'] = int(resolution)
if i == 0:
df = pdata
else:
# SQL like table join along index
df = df.merge(pdata, how='outer', on='Proxy ID')
#fix index and column name
col0 = df.columns[0]
# check time definition and convert to year CE if needed
newcol0 = df[col0][0]
if newcol0 == 'Year C.E.' or newcol0 == 'Year CE':
# do nothing
pass
elif newcol0 == 'Year BP':
newcol0 = 'Year C.E.'
df[col0][1:] = 1950. - df[col0][1:]
else:
print('Unrecognized time definition...')
raise SystemExit()
df.set_index(col0, drop=True, inplace=True)
df.index.name = newcol0
df = df.ix[1:]
df.sort_index(inplace=True)
# Checkin for duplicate ages in proxy record. If present, calculate average (<NAME>, pers. comm.)
df = df.astype(float)
df_f = df.groupby(df.index).mean()
if take_average_out:
# copy of dataframe
df_tmp = df_f
# fill dataframe with new values where temporal averages over proxy records are subtracted
df_f = df_tmp.sub(df_tmp.mean(axis=0), axis=1)
# TODO: make sure year index is consecutive
#write data to file
df_f.to_pickle(dataout)
# Make sure ...
metadata['Archive type'] = metadata['Archive type'].astype(str)
# Add 'Youngest (C.E.)', 'Oldest (C.E.)' 'Elev' and 'Seasonality' info to metadata
sites = list(df_f)
for s in sites:
# 'Youngest' and 'Oldest' info based on the age data
values = df_f[s]
values = values[values.notnull()]
times = values.index.values
meta_ind = metadata[metadata['Proxy ID'] == s].index
metadata.loc[meta_ind,'Oldest (C.E.)'] = np.min(times)
metadata.loc[meta_ind,'Youngest (C.E.)'] = np.max(times)
# write metadata to file
metadata.to_pickle(metaout)
# ===================================================================================
# For PAGES2k phase 2 (2017) proxy data ---------------------------------------------
# ===================================================================================
def pages2kv2_pickle_to_dict(datadir, pages2kv2_file, proxy_def, year_type, gaussianize_data):
"""
Takes in a Pages2k pickle (pklz) file and converts it to python dictionary storage.
Authors: <NAME>, Univ. of Washington, Jan 2016.
<NAME>, Univ. of Southern California, Feb 2017
Revisions:
- Modified output, storing proxy information in dictionary returned
by the function, instead of storing in pandas dataframe dumped to
pickle file, as done in the original version by <NAME>.
[<NAME>, U. of Washington, May 2017]
"""
valid_frac = 0.5
# ===============================================================================
# Upload proxy data from Pages2k v2 pickle file
# ===============================================================================
begin_time = clock.time()
# Open the pickle file containing the Pages2k data, if it exists in target directory
infile = os.path.join(datadir, pages2kv2_file)
if os.path.isfile(infile):
print('Data from PAGES2k phase 2:')
print(' Uploading data from %s ...' %infile)
try:
# try to read as a straight pckl file
pages2k_data = pd.read_pickle(infile)
# f = open(infile,'rb')
# pages2k_data = pickle.load(f)
# f.close()
except:
# failed to read so try as a compressed pckl (pklz) file
try:
f = gzip.open(infile,'rb')
pages2k_data = pickle.load(f)
f.close()
except:
raise SystemExit(('ERROR: Could not read the PAGES2kv2 proxy file {}'
' as a regular or compressed pickle file. Unrecognized format!').format(pages2kv2_file))
else:
raise SystemExit(('ERROR: Option to include PAGES2kv2 proxies enabled'
' but corresponding data file could not be found!'
' Please place file {} in directory {}').format(pages2kv2_file,datadir))
# Summary of the uploaded data
nbsites = len(pages2k_data)
proxy_dict_pagesv2 = {}
tot = []
nb = []
for counter in range(0,nbsites):
# Give each record a unique descriptive name
pages2k_data[counter]['siteID'] = "PAGES2kv2_"+pages2k_data[counter]['dataSetName']+\
"_"+pages2k_data[counter]['paleoData_pages2kID']+\
":"+pages2k_data[counter]['paleoData_variableName']
nb.append(pages2k_data[counter]['siteID'])
print(' Processing %s/%s : %s' %(str(counter+1), str(len(pages2k_data)), pages2k_data[counter]['paleoData_pages2kID']))
# Look for publication title & authors
if 'NEEDS A TITLE' not in pages2k_data[counter]['pub1_title']:
pages2k_data[counter]['pub_title'] = pages2k_data[counter]['pub1_title']
pages2k_data[counter]['pub_author'] = pages2k_data[counter]['pub1_author']
else:
if 'NEEDS A TITLE' not in pages2k_data[counter]['pub2_title']:
pages2k_data[counter]['pub_title'] = pages2k_data[counter]['pub2_title']
pages2k_data[counter]['pub_author'] = pages2k_data[counter]['pub2_author']
else:
pages2k_data[counter]['pub_title'] = 'Unknown'
pages2k_data[counter]['pub_author'] = 'Unknown'
# If the time axis goes backwards (i.e. newer to older), reverse it.
if pages2k_data[counter]['year'][-1] - pages2k_data[counter]['year'][-2] < 0:
pages2k_data[counter]['year'].reverse()
pages2k_data[counter]['paleoData_values'].reverse()
# If subannual, average up to annual --------------------------------------------------------
time_raw = np.array(pages2k_data[counter]['year'],dtype=np.float)
data_raw = np.array(pages2k_data[counter]['paleoData_values'],dtype=np.float)
# Remove values where either time or data is nan.
nan_indices = np.isnan(time_raw)+np.isnan(data_raw)
time_raw = time_raw[~nan_indices]
data_raw = data_raw[~nan_indices]
# Use the following function to make annual-means.
# Inputs: time_raw, data_raw, valid_frac, year_type. Outputs: time_annual, data_annual
time_annual, data_annual, proxy_resolution = compute_annual_means(time_raw,data_raw,valid_frac,year_type)
data_annual = np.squeeze(data_annual)
# If gaussianize_data is set to true, transform the proxy data to Gaussian.
# This option should only be used when using regressions, not physically-based PSMs.
if gaussianize_data == True:
data_annual = gaussianize(data_annual)
# Write the annual data to the dictionary, so they can use written to
# the data file outside of this loop.
pages2k_data[counter]['time_annual'] = time_annual
pages2k_data[counter]['data_annual'] = data_annual
# Rename the proxy types in the same convention as the LMR's NCDC dataset.
# Proxy types not renamed, except capitalizing 1st letter: bivalve, borehole, documents, hybrid
if (pages2k_data[counter]['archiveType'] == 'coral') or (pages2k_data[counter]['archiveType'] == 'sclerosponge'):
pages2k_data[counter]['archiveType'] = 'Corals and Sclerosponges'
elif pages2k_data[counter]['archiveType'] == 'glacier ice':
pages2k_data[counter]['archiveType'] = 'Ice Cores'
elif pages2k_data[counter]['archiveType'] == 'lake sediment':
pages2k_data[counter]['archiveType'] = 'Lake Cores'
elif pages2k_data[counter]['archiveType'] == 'marine sediment':
pages2k_data[counter]['archiveType'] = 'Marine Cores'
elif pages2k_data[counter]['archiveType'] == 'speleothem':
pages2k_data[counter]['archiveType'] = 'Speleothems'
elif pages2k_data[counter]['archiveType'] == 'tree':
pages2k_data[counter]['archiveType'] = 'Tree Rings'
elif pages2k_data[counter]['archiveType'] == 'bivalve':
pages2k_data[counter]['archiveType'] = 'Bivalve'
elif pages2k_data[counter]['archiveType'] == 'borehole':
pages2k_data[counter]['archiveType'] = 'Borehole'
elif pages2k_data[counter]['archiveType'] == 'documents':
pages2k_data[counter]['archiveType'] = 'Documents'
elif pages2k_data[counter]['archiveType'] == 'hybrid':
pages2k_data[counter]['archiveType'] = 'Hybrid'
# Rename some of the the proxy measurements to be more standard.
if (pages2k_data[counter]['archiveType'] == 'Ice Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'd18O1'):
pages2k_data[counter]['paleoData_variableName'] = 'd18O'
elif (pages2k_data[counter]['archiveType'] == 'Tree Rings') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature1'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature1'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature3'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
# Not all records have data for elevation. In these cases, set elevation to nan.
if 'geo_meanElev' not in pages2k_data[counter]:
pages2k_data[counter]['geo_meanElev'] = np.nan
# Ensure lon is in [0,360] domain
if pages2k_data[counter]['geo_meanLon'] < 0.0:
pages2k_data[counter]['geo_meanLon'] = 360 + pages2k_data[counter]['geo_meanLon']
# Determine the seasonality of the record.
# Seasonal names were mapped the three-month climatological seasons.
# 'early summer' was mapped to the first two months of summer only. Is this right????????????
# 'growing season' was mapped to summer.
season_orig = pages2k_data[counter]['climateInterpretation_seasonality']
if any(char.isdigit() for char in season_orig):
pages2k_data_seasonality = list(map(int,season_orig.split(' ')))
elif season_orig == 'annual':
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
elif season_orig == 'summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
elif season_orig == 'winter':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2]
else: pages2k_data_seasonality = [6,7,8]
elif season_orig == 'winter/spring':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2,3,4,5]
else: pages2k_data_seasonality = [6,7,8,9,10,11]
elif season_orig == 'early summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7]
else: pages2k_data_seasonality = [-12,1]
elif season_orig == 'growing season':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
else:
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
# If the year type is "tropical", change all records tagged as "annual" to be tropical-year.
if year_type == 'tropical year' and pages2k_data_seasonality == [1,2,3,4,5,6,7,8,9,10,11,12]:
pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
# Some code to fix two erroneous seasonality metadata found in the PAGES2kv2 file:
# The data in the file itself should be fixed, but error dealt with here in the mean time.
if pages2k_data_seasonality == [6,7,2008]:
pages2k_data_seasonality = [6,7,8]
elif pages2k_data_seasonality == [7,8,2009]:
pages2k_data_seasonality = [7,8,9]
# Spell out the name of the interpretation variable.
if pages2k_data[counter]['climateInterpretation_variable'] == 'T':
pages2k_data[counter]['climateInterpretation_variable'] = 'temperature'
tot.append(len(nb))
# ----------------------------------------------------------------------
# Filter the records which correspond to the proxy types & measurements
# specified in proxy_def dictionary. For records retained, transfer
# a subset of the available information to elements used in used in
# theLMR proxy database.
# ----------------------------------------------------------------------
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
if pages2k_data[counter]['archiveType'] == proxy_archive \
and pages2k_data[counter]['paleoData_variableName'] in proxy_def[key]:
proxy_name = pages2k_data[counter]['siteID']
proxy_dict_pagesv2[proxy_name] = {}
# metadata
proxy_dict_pagesv2[proxy_name]['Archive'] = pages2k_data[counter]['archiveType']
proxy_dict_pagesv2[proxy_name]['Measurement'] = pages2k_data[counter]['paleoData_variableName']
proxy_dict_pagesv2[proxy_name]['SiteName'] = pages2k_data[counter]['geo_siteName']
proxy_dict_pagesv2[proxy_name]['StudyName'] = pages2k_data[counter]['pub_title']
proxy_dict_pagesv2[proxy_name]['Investigators'] = pages2k_data[counter]['pub_author']
proxy_dict_pagesv2[proxy_name]['Location'] = pages2k_data[counter]['geo_pages2kRegion']
proxy_dict_pagesv2[proxy_name]['Resolution (yr)'] = proxy_resolution
proxy_dict_pagesv2[proxy_name]['Lat'] = pages2k_data[counter]['geo_meanLat']
proxy_dict_pagesv2[proxy_name]['Lon'] = pages2k_data[counter]['geo_meanLon']
proxy_dict_pagesv2[proxy_name]['Elevation'] = pages2k_data[counter]['geo_meanElev']
proxy_dict_pagesv2[proxy_name]['YearRange'] = (int('%.0f' %pages2k_data[counter]['time_annual'][0]),int('%.0f' %pages2k_data[counter]['time_annual'][-1]))
proxy_dict_pagesv2[proxy_name]['Databases'] = ['PAGES2kv2']
proxy_dict_pagesv2[proxy_name]['Seasonality'] = pages2k_data_seasonality
proxy_dict_pagesv2[proxy_name]['climateVariable'] = pages2k_data[counter]['climateInterpretation_variable']
proxy_dict_pagesv2[proxy_name]['Realm'] = pages2k_data[counter]['climateInterpretation_variableDetail']
proxy_dict_pagesv2[proxy_name]['climateVariableDirec'] = pages2k_data[counter]['climateInterpretation_interpDirection']
# data
proxy_dict_pagesv2[proxy_name]['Years'] = pages2k_data[counter]['time_annual']
proxy_dict_pagesv2[proxy_name]['Data'] = pages2k_data[counter]['data_annual']
nbtot = sum(tot)
print('----------------------------------------------------------------------')
print(' PAGES2kv2 SUMMARY: ')
print(' Total nb of records found in file : %d' %nbsites)
print(' Number of proxy chronologies included in df : %d' %(len(proxy_dict_pagesv2)))
print(' ------------------------------------------------------')
print(' ')
tot = []
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
proxy_measurement = proxy_def[key]
# change the associated between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_measurement = [item+'_breit' for item in proxy_measurement]
nb = []
for siteID in list(proxy_dict_pagesv2.keys()):
if proxy_dict_pagesv2[siteID]['Archive'] == proxy_archive and proxy_dict_pagesv2[siteID]['Measurement'] in proxy_measurement:
nb.append(siteID)
print((' %s : %d' %('{:40}'.format(key), len(nb))))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print((' %s : %d' %('{:40}'.format('Total:'), nbtot)))
print('----------------------------------------------------------------------')
print(' ')
elapsed_time = clock.time() - begin_time
print('PAGES2k phase2 data extraction completed in %s secs' %str(elapsed_time))
return proxy_dict_pagesv2
# ===================================================================================
# For NCDC-templated proxy data files -----------------------------------------------
# ===================================================================================
def contains_blankspace(str):
return True in [c in str for c in string.whitespace]
# ===================================================================================
def colonReader(string, fCon, fCon_low, end):
'''This function seeks a specified string (or list of strings) within
the transcribed file fCon (lowercase version fCon_low) until a specified
character (typically end of the line) is found.x
If a list of strings is provided, make sure they encompass all possibilities
From <NAME> (Univ. of Southern California)
'''
if isinstance(string, str):
lstr = string + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex] # returned string
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
return rstring.strip()
else:
return ""
else:
num_str = len(string)
rstring = "" # initialize returned string
for k in range(0,num_str): # loop over possible strings
lstr = string[k] + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex]
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
if rstring == "":
return ""
else:
return rstring.strip()
# ===================================================================================
# ===================================================================================
def read_proxy_data_NCDCtxt(site, proxy_def, year_type=None, gaussianize_data=False):
#====================================================================================
# Purpose: Reads data from a selected site (chronology) in NCDC proxy dataset
#
# Input :
# - site : Full name of proxy data file, including the directory where
# file is located.
# - proxy_def : Dictionary containing information on proxy types to look for
# and associated characteristics, such as possible proxy
# measurement labels for the specific proxy type
# (ex. ['d18O','d18o','d18o_stk','d18o_int','d18o_norm']
# for delta 18 oxygen isotope measurements)
#
# Returns :
# - id : Site id read from the data file
# - lat/lon : latitude & longitude of the site
# - alt : Elevation of the site
# - time : Array containing the time of uploaded data
# - value : Array of uploaded proxy data
#
# Author(s): <NAME>, Univ. of Washington, Dept. of Atmospheric Sciences
# based on "ncdc_file_parser.py" code from <NAME>
# (Univ. of Southern California)
#
# Date : March 2015
#
# Revision : None
#
#====================================================================================
# Possible header definitions of time in data files ...
time_defs = ['age', 'age_int', 'year', \
'y_ad','Age_AD','age_AD','age_AD_ass','age_AD_int','Midpt_year','AD',\
'age_yb1950','yb_1950','yrb_1950',\
'kyb_1950',\
'yb_1989','age_yb1989',\
'yb_2000','yr_b2k','yb_2k','ky_b2k','kyb_2000','kyb_2k','kab2k','ka_b2k','kyr_b2k',\
'ky_BP','kyr_BP','ka_BP','age_kaBP','yr_BP','calyr_BP','Age(yrBP)','age_calBP','cal yr BP']
filename = site
valid_frac = 0.5
if os.path.isfile(filename):
print(' ')
print('File: %s' % filename)
# Define root string for filename
file_s = filename.replace(" ", '_') # strip all whitespaces if present
fileroot = '_'.join(file_s.split('.')[:-1])
# Open the file and port content to a string object
# Changed assumed encoding to UTF-8, anything not readable replaced with
# a '?' --AP Jan 2018
filein = open(filename, encoding='utf-8', errors='replace')
fileContent = filein.read()
fileContent_low = fileContent.lower()
# Initialize empty dictionary
d = {}
# Assign default values to some metadata
d['ElevationUnit'] = 'm'
d['TimeUnit'] = 'y_ad'
# note: 8240/2030 ASCII code for "permil"
# ===========================================================================
# ===========================================================================
# Extract metadata from file ------------------------------------------------
# ===========================================================================
# ===========================================================================
try:
# 'Archive' is the proxy type
archive_tag = colonReader('archive', fileContent, fileContent_low, '\n')
# to match definitions of records from original NCDC-templeated files and those
# provided by <NAME> (U. of Arizona)
if archive_tag == 'Paleoceanography': archive_tag = 'Marine Cores'
d['Archive'] = archive_tag
# Other info
study_name = colonReader('study_name', fileContent, fileContent_low, '\n')
d['Title'] = study_name
investigators = colonReader('investigators', fileContent, fileContent_low, '\n')
investigators.replace(';',' and') # take out the ; so that turtle doesn't freak out.
d['Investigators'] = investigators
d['PubDOI'] = colonReader('doi', fileContent, fileContent_low, '\n')
# ===========================================================================
# Extract information from the "Site_Information" section of the file -------
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Site_Information:')
if sline_begin == -1:
sline_begin = fileContent.find('# Site_Information')
if sline_begin == -1:
sline_begin = fileContent.find('# Site Information')
# Find end of block
sline_end = fileContent.find('# Data_Collection:')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection\n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection\n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection\n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
SiteInfo = fileContent[sline_begin:sline_end]
SiteInfo_low = SiteInfo.lower()
d['SiteName'] = colonReader('site_name', SiteInfo, SiteInfo_low, '\n')
d['Location'] = colonReader('location', SiteInfo, SiteInfo_low, '\n')
# get lat/lon info
try:
str_lst = ['northernmost_latitude', 'northernmost latitude'] # documented instances of this field property
d['NorthernmostLatitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
str_lst = ['southernmost_latitude', 'southernmost latitude'] # documented instances of this field property
d['SouthernmostLatitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
str_lst = ['easternmost_longitude', 'easternmost longitude'] # documented instances of this field property
d['EasternmostLongitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
str_lst = ['westernmost_longitude', 'westernmost longitude'] # documented instances of this field property
d['WesternmostLongitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
except (EmptyError,TypeError,ValueError) as err:
print('*** %s' % err.args)
print('*** WARNING ***: Valid values of lat/lon were not found! Skipping proxy record...')
return (None, None)
# get elevation info
elev = colonReader('elevation', SiteInfo, SiteInfo_low, '\n')
if 'nan' not in elev and len(elev)>0:
elev_s = elev.split(' ')
# is elevation negative (depth)?
if '-' in elev_s[0] or d['Archive'] == 'Marine Cores':
negative = True
sign = '-'
else:
negative = False
sign = ''
# is there a decimal in elev_s?
if '.' in elev_s[0]:
elev_s_split = elev_s[0].split('.')
elev_s_int = ''.join(c for c in elev_s_split[0] if c.isdigit())
elev_s_dec = ''.join(c for c in elev_s_split[1] if c.isdigit())
d['Elevation'] = float(sign+elev_s_int+'.'+elev_s_dec)
else:
d['Elevation'] = float(sign+''.join(c for c in elev_s[0] if c.isdigit())) # to only keep digits ...
else:
d['Elevation'] = float('NaN')
# ===========================================================================
# Extract information from the "Data_Collection" section of the file --------
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Data_Collection:')
if sline_begin == -1:
sline_begin = fileContent.find('# Data_Collection')
if sline_begin == -1:
sline_begin = fileContent.find('# Data_Collection\n')
# Find end of block
sline_end = fileContent.find('# Variables:')
if sline_end == -1:
sline_end = fileContent.find('# Variables\n')
if sline_end == -1:
sline_end = fileContent.find('# Variables \n')
if sline_end == -1:
sline_end = fileContent.find('# Variables')
if sline_end == -1:
sline_end = fileContent.find('# Variables ')
DataColl = fileContent[sline_begin:sline_end]
DataColl_low = DataColl.lower()
d['CollectionName'] = colonReader('collection_name', DataColl, DataColl_low, '\n')
if not d['CollectionName']: d['CollectionName'] = filename.split('/')[-1].rstrip('.txt')
EarliestYearStr = colonReader('earliest_year', DataColl, DataColl_low, '\n')
MostRecentYearStr = colonReader('most_recent_year', DataColl, DataColl_low, '\n')
d['EarliestYear'] = None
d['MostRecentYear'] = None
if EarliestYearStr: d['EarliestYear'] = float(EarliestYearStr)
if EarliestYearStr: d['MostRecentYear'] = float(MostRecentYearStr)
d['TimeUnit'] = colonReader('time_unit', DataColl, DataColl_low, '\n')
if not d['TimeUnit']: d['TimeUnit'] = colonReader('time unit', DataColl, DataColl_low, '\n')
if d['TimeUnit'] not in time_defs:
print('***Time_Unit *%s* not in recognized time definitions! Exiting!' %d['TimeUnit'])
return (None, None)
# Get Notes: information, if it exists
notes = colonReader('notes', DataColl, DataColl_low, '\n')
if notes: # not empty
# database info is in form {"database":db1}{"database":db2} ...
# extract fields that are in {}. This produces a list.
jsdata = re.findall('\{.*?\}',notes)
bad_chars = '{}"'
jsdata = [item.translate(str.maketrans("", "", bad_chars)) for item in jsdata]
# Look for database information
# -----------------------------
# item in jsdata list with database info?
# TODO: ... think about using try/except instead ...
dbinfo = None
jsdata_db = [item for i, item in enumerate(jsdata) if 'database:' in item]
if jsdata_db:
db_lst = re.sub('database:', '', jsdata_db[0]).split(',')
if len(db_lst) > 1:
dbinfo = [item.split(':')[1] for item in db_lst]
else:
dbinfo = db_lst
# check if some db info exists
if dbinfo:
d['Databases'] = dbinfo
else:
# Set to default value if not found.
#d['Databases'] = None
d['Databases'] = ['LMR']
# Look for information on "climate interpretation" of proxy record
# ----------------------------------------------------------------
# Initialize metadata to be extracted
seasonality = [1,2,3,4,5,6,7,8,9,10,11,12] # annual (calendar)
climateVariable = None
climateVariableRealm = None
climateVariableDirec = None
jsdata_clim = [item for i, item in enumerate(jsdata) if 'climateInterpretation:' in item]
if jsdata_clim:
clim_lst = re.sub('climateInterpretation:', '', jsdata_clim[0])
clim_lst = clim_lst.replace('[','(').replace(']',')')
tmp = re.split(r',\s*(?![^()]*\))',clim_lst)
clim_elements = [item.replace('(','[').replace(')',']') for item in tmp]
seasonality = [item.split(':')[1] for item in clim_elements if 'seasonality:' in item][0]
climateVariable = [item.split(':')[1] for item in clim_elements if 'climateVariable:' in item][0]
climateVariableRealm = [item.split(':')[1] for item in clim_elements if 'climateVariableDetail:' in item][0]
climateVariableDirec = [item.split(':')[1] for item in clim_elements if 'interpDirection:' in item][0]
if len(seasonality) == 0: seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
if len(climateVariable) == 0: climateVariable = None
if len(climateVariableRealm) == 0: climateVariableRealm = None
if len(climateVariableDirec) == 0: climateVariableDirec = None
# Some translation...
if climateVariable == 'T': climateVariable = 'temperature'
if climateVariable == 'M': climateVariable = 'moisture'
# test whether seasonality is a string or already a list
# if a string, convert to list
if type(seasonality) is not list:
if isinstance(seasonality,six.string_types):
seasonality = ast.literal_eval(seasonality)
else:
print('Problem with seasonality metadata! Exiting!')
SystemExit(1)
d['Seasonality'] = seasonality
d['climateVariable'] = climateVariable
d['climateVariableRealm'] = climateVariableRealm
d['climateVariableDirec'] = climateVariableDirec
# Look for information about duplicate proxy records
# --------------------------------------------------
dup_lst = []
jsdata_dup = [item for i, item in enumerate(jsdata) if 'duplicate:' in item]
if jsdata_dup:
tmp = re.sub('duplicate:', '', jsdata_dup[0]).split(',')
if len(tmp) > 1:
dup_lst = [item.split(':')[1].rstrip('.txt') for item in tmp]
else:
dup_lst = [item.rstrip('.txt') for item in tmp]
d['Duplicates'] = dup_lst
"""
# Old code that worked for NCDC v0.0.0
# Look for information on relation to temperature
# -----------------------------------------------
clim_temp_relation = [item.split(':')[1] for item in jsdata if item.split(':')[0] == 'relationship']
if clim_temp_relation:
d['Relation_to_temp'] = clim_temp_relation[0]
else:
d['Relation_to_temp'] = None
# Look for information on the nature of sensitivity of the proxy data
# (i.e. temperature or moisture or etc.)
# -------------------------------------------------------------------
clim_sensitivity = [item.split(':')[1] for item in jsdata if item.split(':')[0] == 'sensitivity']
if clim_sensitivity:
d['Sensitivity'] = clim_sensitivity[0]
else:
d['Sensitivity'] = None
"""
d['Relation_to_temp'] = None
d['Sensitivity'] = None
else:
# Default values if not found.
#d['Databases'] = None
d['Databases'] = ['LMR']
d['Seasonality'] = [1,2,3,4,5,6,7,8,9,10,11,12]
d['climateVariable'] = None
d['climateVariableRealm'] = None
d['climateVariableDirec'] = None
d['Duplicates'] = []
d['Relation_to_temp'] = None
d['Sensitivity'] = None
# If the year type is "tropical", change all annual records to the tropical-year mean.
if year_type == 'tropical year' and d['Seasonality'] == [1,2,3,4,5,6,7,8,9,10,11,12]:
d['Seasonality'] = [4,5,6,7,8,9,10,11,12,13,14,15]
except EmptyError as e:
print(e)
return (None, None)
# ===========================================================================
# ===========================================================================
# Extract the data from file ------------------------------------------------
# ===========================================================================
# ===========================================================================
# ===========================================================================
# Extract information from the "Variables" section of the file --------------
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Variables:')
if sline_begin == -1:
sline_begin = fileContent.find('# Variables')
# Find end of block
sline_end = fileContent.find('# Data:')
if sline_end == -1:
sline_end = fileContent.find('# Data\n')
VarDesc = fileContent[sline_begin:sline_end].splitlines()
nvar = 0 # counter for variable number
for line in VarDesc: # handle all the NCDC convention changes
# (TODO: more clever/general exception handling)
if line and line[0] != '' and line[0] != ' ' and line[0:2] != '#-' and line[0:2] != '# ' and line != '#':
nvar = nvar + 1
line2 = line.replace('\t',',') # clean up
sp_line = line2.split(',') # split line along commas
if len(sp_line) < 9:
continue
else:
d['DataColumn' + format(nvar, '02') + '_ShortName'] = sp_line[0].strip('#').strip(' ')
d['DataColumn' + format(nvar, '02') + '_LongName'] = sp_line[1]
d['DataColumn' + format(nvar, '02') + '_Material'] = sp_line[2]
d['DataColumn' + format(nvar, '02') + '_Uncertainty'] = sp_line[3]
d['DataColumn' + format(nvar, '02') + '_Units'] = sp_line[4]
d['DataColumn' + format(nvar, '02') + '_Seasonality'] = sp_line[5]
d['DataColumn' + format(nvar, '02') + '_Archive'] = sp_line[6]
d['DataColumn' + format(nvar, '02') + '_Detail'] = sp_line[7]
d['DataColumn' + format(nvar, '02') + '_Method'] = sp_line[8]
d['DataColumn' + format(nvar, '02') + '_CharOrNum'] = sp_line[9].strip(' ')
print('Site ID: %s Archive: %s' %(d['CollectionName'], d['Archive']))
# Cross-reference "ShortName" entries with possible proxy measurements specified in proxy_def dictionary
proxy_types_all = list(proxy_def.keys())
# Restrict to those matching d['Archive']
proxy_types_keep = [s for s in proxy_types_all if d['Archive'] in s or d['Archive'] in s.lower()]
# Which columns contain the important data (time & proxy values) to be extracted?
# Referencing variables (time/age & proxy data) with data column IDsx
# Time/age
TimeColumn_ided = False
for ivar in range(nvar):
if d['DataColumn' + format(ivar+1, '02') + '_ShortName'] in time_defs:
TimeColumn_ided = True
TimeColumn_id = ivar
if TimeColumn_ided:
print(' Time/Age data in data column: %d' %TimeColumn_id)
else:
print(' ')
# Proxy data
# Dictionary containing info on proxy type and column ID where to find the data
DataColumns_ided = False
proxy_types_in_file = {}
for ivar in range(nvar):
proxy_types = [s for s in proxy_types_keep if d['DataColumn' + format(ivar+1, '02') + '_ShortName'] in proxy_def[s]]
if proxy_types: # if non-empty list
# Crude logic to distinguish between PAGES2kv2 vs Breitenmoser Tree Rings data at proxy type level
if len(proxy_types) > 1 and [item for item in proxy_types if 'Tree Rings' in item ]:
if 'Breitenmoser' in d['Investigators'].split(',')[0]:
treetag = '_WidthBreit'
else:
treetag = '_WidthPages2'
ind = [i for i, s in enumerate(proxy_types) if s.endswith(treetag)][0]
proxy_types_in_file[proxy_types[ind]] = (d['DataColumn' + format(ivar+1, '02') + '_ShortName'], ivar)
else:
proxy_types_in_file[proxy_types[0]] = (d['DataColumn' + format(ivar+1, '02') + '_ShortName'], ivar)
dkeys = list(proxy_types_in_file.keys())
nbvalid = len(dkeys)
if nbvalid > 0:
DataColumns_ided = True
print(' Found %d valid proxy variables:' %nbvalid)
for i in range(nbvalid):
print(' %d : %s %s' %(i,dkeys[i],proxy_types_in_file[dkeys[i]]))
# Check status of what has been found in the data file
# If nothing found, just return (exit function by returning None)
if not TimeColumn_ided or not DataColumns_ided:
print('*** WARNING *** Valid data was not found in file!')
return (None, None)
# -- Checking time/age definition --
tdef = d['TimeUnit']
# Crude sanity checks on make-up of tdef string
if contains_blankspace(tdef):
tdef = tdef.replace(' ', '_')
tdef_parsed = tdef.split('_')
if len(tdef_parsed) != 2:
tdef_parsed = tdef.split('_')
if tdef_parsed[0] == 'cal' and tdef_parsed[1] == 'yr':
tdef = tdef_parsed[0]+tdef_parsed[1]+'_'+tdef_parsed[2]
tdef_parsed = tdef.split('_')
else:
print('*** WARNING *** Unrecognized time definition. Skipping proxy record...')
return (None, None)
# ===========================================================================
# Extract the numerical data from the "Data" section of the file ------------
# ===========================================================================
# Find line number at beginning of data block
sline = fileContent.find('# Data:')
if sline == -1:
sline = fileContent.find('# Data\n')
fileContent_datalines = fileContent[sline:].splitlines()
# Look for missing value info
missing_info_line= [line for line in fileContent_datalines if 'missing value' in line.lower()]
if len(missing_info_line) > 0:
missing_info = missing_info_line[0].split(':')[-1].replace(' ', '')
if len(missing_info) > 0:
missing_values = np.array([float(missing_info)])
else:
# Line present but no value found
missing_values = np.array([-999.0, np.nan])
else:
# Line not found
missing_values = np.array([-999.0, np.nan])
# Find where the actual data begin
start_line_index = 0
line_nb = 0
for line in fileContent_datalines: # skip lines without actual data
if not line or line[0]=='#' or line[0] == ' ':
start_line_index += 1
else:
start_line_index2 = line_nb
break
line_nb +=1
# Extract column descriptions (headers) of the data matrix
DataColumn_headers = fileContent_datalines[start_line_index].splitlines()[0].split('\t')
# Strip possible blanks in column headers
DataColumn_headers = [item.strip() for item in DataColumn_headers]
nc = len(DataColumn_headers)
# ---------------------
# -- Now the data !! --
# ---------------------
inds_to_extract = []
for dkey in dkeys:
inds_to_extract.append(proxy_types_in_file[dkey][1])
# from start of data block to end, in a list
datalist = fileContent_datalines[start_line_index+1:]
# Strip any empty lines
datalist = [x for x in datalist if x]
nbdata = len(datalist)
# into numpy arrays
time_raw = np.zeros(shape=[nbdata])
data_raw = np.zeros(shape=[nbdata,nbvalid])
# fill with NaNs for default values
data_raw[:] = np.NAN
for i in range(nbdata):
tmp = datalist[i].split('\t')
# any empty element replaced by NANs
tmp = ['NAN' if x == '' else x for x in tmp]
time_raw[i] = tmp[TimeColumn_id]
# strip possible "()" in data before conversion to float
# not sure why these are found sometimes ... sigh...
tmp = [tmp[j].replace('(','') for j in range(len(tmp))]
tmp = [tmp[j].replace(')','') for j in range(len(tmp))]
data_raw[i,:] = [float(tmp[j]) for j in inds_to_extract]
# -- Double check data validity --
# (time/age in particular as some records have entries w/ undefined age)
# Eliminate entries for which time/age is not defined (tagged as missing)
mask = np.in1d(time_raw, missing_values, invert=True)
time_raw = time_raw[mask]
data_raw = data_raw[mask,:]
# Making sure remaining entries in data array with missing values are converted to NaN.
ntime, ncols = data_raw.shape
for c in range(ncols):
data_raw[np.in1d(data_raw[:,c], missing_values), c] = np.NAN
# --- Modify "time" array into "years CE" if not already ---
# Here, tdef_parsed *should* have the expected structure
if len(tdef_parsed) == 2 and tdef_parsed[0] and tdef_parsed[1]:
if tdef_parsed[0] == 'yb' and is_number(tdef_parsed[1]):
time_raw = float(tdef_parsed[1]) - time_raw
elif tdef_parsed[0] == 'kyb' and is_number(tdef_parsed[1]):
time_raw = float(tdef_parsed[1]) - 1000.0*time_raw
elif tdef_parsed[0] == 'calyr' and tdef_parsed[1] == 'BP':
time_raw = 1950.0 - time_raw
elif tdef_parsed[0] == 'kyr' and tdef_parsed[1] == 'BP':
time_raw = 1950.0 - 1000.*time_raw
elif tdef_parsed[0] == 'kyr' and tdef_parsed[1] == 'b2k':
time_raw = 2000.0 - 1000.*time_raw
elif tdef_parsed[0] == 'y' and tdef_parsed[1] == 'ad':
pass # do nothing, time already in years_AD
else:
print('*** WARNING *** Unrecognized time definition. Skipping proxy record...')
return (None, None)
else:
print('*** WARNING *** Unexpected time definition. Skipping proxy record...')
return (None, None)
# Making sure the tagged earliest and most recent years of the record are consistent with the data,
# already transformed in year CE, common to all records before inclusion in the pandas DF.
d['EarliestYear'] = np.min(time_raw)
d['MostRecentYear'] = np.max(time_raw)
# Initial range in years for which data is available
yearRange = (int('%.0f' % d['EarliestYear']),int('%.0f' %d['MostRecentYear']))
# proxy identifier and geo location
id = d['CollectionName']
alt = d['Elevation']
# Something crude in assignement of lat/lon:
if d['NorthernmostLatitude'] != d['SouthernmostLatitude']:
lat = (d['NorthernmostLatitude'] + d['SouthernmostLatitude'])/2.0
else:
lat = d['NorthernmostLatitude']
if d['EasternmostLongitude'] != d['WesternmostLongitude']:
lon = (d['EasternmostLongitude'] + d['WesternmostLongitude'])/2.0
else:
lon = d['EasternmostLongitude']
# Ensure lon is in [0,360] domain
if lon < 0.0:
lon = 360 + lon
# If subannual, average up to annual --------------------------------------------------------
time_annual, data_annual, proxy_resolution = compute_annual_means(time_raw,data_raw,valid_frac,year_type)
# If gaussianize_data is set to true, transform the proxy data to Gaussian.
# This option should only be used when using regressions, not physically-based PSMs.
if gaussianize_data == True:
data_annual = gaussianize(data_annual)
# update to yearRange given availability of annual data
yearRange = (int('%.0f' %time_annual[0]),int('%.0f' %time_annual[-1]))
# Define and fill list of dictionaries to be returned by function
returned_list = []
duplicate_list = []
for k in range(len(dkeys)):
key = dkeys[k]
ind = proxy_types_in_file[key][1]
proxy_units = d['DataColumn' + format(ind+1, '02') + '_Units']
proxy_archive = key.split('_')[0]
proxy_measurement = key.split('_')[1]
proxy_measurement = d['DataColumn' + format(ind+1, '02') + '_ShortName']
if key == 'Tree Rings_WidthBreit': proxy_measurement = proxy_measurement + '_breit'
proxy_name = d['CollectionName']+':'+proxy_measurement
proxydata_dict = {}
proxydata_dict[proxy_name] = {}
if d['Archive'] != proxy_archive: d['Archive'] = proxy_archive
proxydata_dict[proxy_name]['Archive'] = d['Archive']
proxydata_dict[proxy_name]['SiteName'] = d['SiteName']
proxydata_dict[proxy_name]['StudyName'] = d['Title']
proxydata_dict[proxy_name]['Investigators'] = d['Investigators']
proxydata_dict[proxy_name]['Location'] = d['Location']
proxydata_dict[proxy_name]['Resolution (yr)'] = proxy_resolution
proxydata_dict[proxy_name]['Lat'] = lat
proxydata_dict[proxy_name]['Lon'] = lon
proxydata_dict[proxy_name]['Elevation'] = alt
proxydata_dict[proxy_name]['YearRange'] = yearRange
proxydata_dict[proxy_name]['Measurement'] = proxy_measurement
proxydata_dict[proxy_name]['DataUnits'] = proxy_units
proxydata_dict[proxy_name]['Databases'] = d['Databases']
proxydata_dict[proxy_name]['Seasonality'] = d['Seasonality']
proxydata_dict[proxy_name]['climateVariable'] = d['climateVariable']
proxydata_dict[proxy_name]['Realm'] = d['climateVariableRealm']
proxydata_dict[proxy_name]['climateVariableDirec'] = d['climateVariableDirec']
# *** for v.0.1.0:
#proxydata_dict[proxy_name]['Relation_to_temp'] = d['Relation_to_temp']
#proxydata_dict[proxy_name]['Sensitivity'] = d['Sensitivity']
proxydata_dict[proxy_name]['Years'] = time_annual
proxydata_dict[proxy_name]['Data'] = data_annual[:, k]
if d['Duplicates']:
duplicate_list.extend(d['Duplicates'])
# append to list of dictionaries
returned_list.append(proxydata_dict)
else:
print('***File NOT FOUND: %s' % filename)
returned_list = []
duplicate_list = []
return returned_list, duplicate_list
# =========================================================================================
def ncdc_txt_to_dict(datadir, proxy_def, year_type, gaussianize_data):
"""
Read proxy data from collection of NCDC-templated text files and store the data in
a python dictionary.
:param datadir :
:param proxy_def :
:param metaout :
:param dataout :
:return:
Author: <NAME>, Univ. of Washington, Jan 2016.
"""
# ===============================================================================
# Upload proxy data from NCDC-formatted text files
# ===============================================================================
begin_time = clock.time()
print('Data from LMR NCDC-templated text files:')
valid_frac = 0.5
# List filenames im the data directory (dirname)
# files is a python list contining file names to be read
sites_data = glob.glob(datadir+"/*.txt")
nbsites = len(sites_data)
if nbsites == 0:
print('ERROR: NCDC-templated proxy data files not found in directory:'
' %s. Please revise your user-defined parameters or directory/'
' data set-up.' %datadir)
raise SystemExit(1)
# Master dictionary containing all proxy chronologies extracted from the data files.
proxy_dict_ncdc = {}
dupelist = []
# Loop over files
nbsites_valid = 0
for file_site in sites_data:
proxy_list, duplicate_list = read_proxy_data_NCDCtxt(file_site,proxy_def,year_type,gaussianize_data)
if proxy_list: # if returned list is not empty
# extract data from list and populate the master proxy dictionary
for item in proxy_list:
proxy_name = list(item.keys())[0]
# test if dict element already exists
if proxy_name in list(proxy_dict_ncdc.keys()):
dupelist.append(proxy_name)
else:
proxy_dict_ncdc[proxy_name] = item[proxy_name]
nbsites_valid = nbsites_valid + 1
else: # returned list is empty, just move to next site
pass
# ===============================================================================
# Produce a summary of uploaded proxy data &
# generate integrated database in pandas DataFrame format
# ===============================================================================
# Summary
nbchronol = len(proxy_dict_ncdc)
print(' ')
print(' ')
print('----------------------------------------------------------------------')
print(' NCDC SUMMARY: ')
print(' Total nb of files found & queried : %d' % nbsites)
print(' Total nb of files with valid data : %d' % nbsites_valid)
print(' Number of proxy chronologies included in df : %d' % nbchronol)
print(' ------------------------------------------------------')
print(' ')
tot = []
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
proxy_measurement = proxy_def[key]
# change the association between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_measurement = [item+'_breit' for item in proxy_measurement]
nb = []
for siteID in list(proxy_dict_ncdc.keys()):
if proxy_dict_ncdc[siteID]['Archive'] == proxy_archive and proxy_dict_ncdc[siteID]['Measurement'] in proxy_measurement:
nb.append(siteID)
print(' %s : %d' %('{:40}'.format(key), len(nb)))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' %s : %d' %('{:40}'.format('Total:'), nbtot))
print('----------------------------------------------------------------------')
print(' ')
if dupelist:
print('***WARNING***: Proxy records with these names were found multiple times:')
print(dupelist)
elapsed_time = clock.time() - begin_time
print('NCDC data extraction completed in %s secs' %str(elapsed_time))
return proxy_dict_ncdc
# =========================================================================================
def merge_dicts_to_dataframes(proxy_def, ncdc_dict, pages2kv2_dict, meta_outfile, data_outfile, \
duplicates_file, eliminate_duplicates):
"""
Merges two dictionaries containing proxy metadata and data from two data sources
(PAGES2k phase 2 and NCDC-templated proxy data files) into one,
and writes out metadata and data into pickled pandas DataFrames.
Originator: <NAME>, Univ. of Washington, May 2017
"""
if len(ncdc_dict) > 0:
merged_dict = deepcopy(ncdc_dict)
if len(pages2kv2_dict) > 0:
merged_dict.update(pages2kv2_dict)
elif len(pages2kv2_dict) > 0:
merged_dict = deepcopy(pages2kv2_dict)
else:
raise SystemExit('No dataset has been selected for inclusion in the proxy database!')
totchronol = len(merged_dict)
dupecount = 0
if eliminate_duplicates:
print(' ')
print('Checking list of duplicate/bad records:')
# load info on duplicate records
dupes = pd.read_excel(duplicates_file,'ProxyDuplicates')
# numpy array containing names of proxy records to eliminate
toflush = dupes['Record_To_Eliminate'].values
for siteID in list(merged_dict.keys()):
if siteID in toflush:
try:
del merged_dict[siteID]
print(' -- deleting: %s' % siteID)
dupecount += 1
except KeyError:
print(' -- not found: %s' % siteID)
pass
print(' ')
print('----------------------------------------------------------------------')
print(' FINAL SUMMARY: ')
print(' Total number of merged proxy chronologies : %d' %totchronol)
print(' Total number of eliminated chronologies : %d' %dupecount)
print(' Number of proxy chronologies included in df : %d' %len(merged_dict))
print(' ------------------------------------------------------')
tot = []
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
proxy_measurement = proxy_def[key]
# change the association between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_measurement = [item+'_breit' for item in proxy_measurement]
nb = []
for siteID in list(merged_dict.keys()):
if merged_dict[siteID]['Archive'] == proxy_archive and merged_dict[siteID]['Measurement'] in proxy_measurement:
nb.append(siteID)
print(' %s : %d' %('{:40}'.format(key), len(nb)))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' %s : %d' %('{:40}'.format('Total:'), nbtot))
print('----------------------------------------------------------------------')
print(' ')
# ---------------------------------------------------------------------
# Preparing pandas DataFrames containing merged proxy metadata and data
# and output to pickle files
# ---------------------------------------------------------------------
# Loop over proxy types specified in *main*
counter = 0
# Build up pandas DataFrame
metadf = pd.DataFrame()
# headers = ['Proxy ID','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement','Resolution (yr)',\
# 'Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
# 'Seasonality', 'Databases']
headers = ['Proxy ID','Study name','Investigators','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement',\
'Resolution (yr)','Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
'Seasonality', 'Databases']
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
# change the associated between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_def[key] = [item+'_breit' for item in proxy_def[key]]
for siteID in list(merged_dict.keys()):
if merged_dict[siteID]['Archive'] == proxy_archive and merged_dict[siteID]['Measurement'] in proxy_def[key]:
frame = pd.DataFrame({'a':siteID, 'b':merged_dict[siteID]['StudyName'], 'c':merged_dict[siteID]['Investigators'], \
'd':merged_dict[siteID]['SiteName'], 'e':merged_dict[siteID]['Lat'], 'f':merged_dict[siteID]['Lon'], \
'g':merged_dict[siteID]['Elevation'], 'h':merged_dict[siteID]['Archive'], 'i':merged_dict[siteID]['Measurement'], \
'j':merged_dict[siteID]['Resolution (yr)'], 'k':merged_dict[siteID]['YearRange'][0], \
'l':merged_dict[siteID]['YearRange'][1], 'm':merged_dict[siteID]['Location'], \
'n':merged_dict[siteID]['climateVariable'], 'o':merged_dict[siteID]['Realm'], \
'p':merged_dict[siteID]['climateVariableDirec'], \
'q':None, 'r':None}, index=[counter])
# To get seasonality & databases *lists* into columns 'o' and 'p' of DataFrame
# To be deprecated - frame.set_value(counter,'q',merged_dict[siteID]['Seasonality'])
# To be deprecated - frame.set_value(counter,'r',merged_dict[siteID]['Databases'])
frame.at[counter,'q'] = merged_dict[siteID]['Seasonality']
frame.at[counter,'r'] = merged_dict[siteID]['Databases']
# Append to main DataFrame
metadf = metadf.append(frame)
counter = counter + 1
# Redefine column headers
metadf.columns = headers
# Write metadata to file
print('Now writing metadata to file: %s' %meta_outfile)
metadf.to_pickle(meta_outfile)
# -----------------------------------------------------
# Build the proxy **data** DataFrame and output to file
# -----------------------------------------------------
print(' ')
print('Now creating & loading the data in the pandas DataFrame...')
print(' ')
counter = 0
for siteID in list(merged_dict.keys()):
years = merged_dict[siteID]['Years']
data = merged_dict[siteID]['Data']
[nbdata,] = years.shape
# Load data in numpy array
frame_data = np.zeros(shape=[nbdata,2])
frame_data[:,0] = years
frame_data[:,1] = data
if counter == 0:
# Build up pandas DataFrame
header = ['Proxy ID', siteID]
df = pd.DataFrame({'a':frame_data[:,0], 'b':frame_data[:,1]})
df.columns = header
else:
frame = pd.DataFrame({'Proxy ID':frame_data[:,0], siteID:frame_data[:,1]})
df = df.merge(frame, how='outer', on='Proxy ID')
counter = counter + 1
# Fix DataFrame index and column name
col0 = df.columns[0]
df.set_index(col0, drop=True, inplace=True)
df.index.name = 'Year C.E.'
df.sort_index(inplace=True)
# Write data to file
print('Now writing to file:', data_outfile)
df = df.to_sparse()
df.to_pickle(data_outfile)
print('Done!')
# =========================================================================================
def ncdc_txt_to_dataframes(datadir, proxy_def, metaout, dataout, eliminate_duplicates):
"""
Takes in NCDC text proxy data and converts it to dataframe storage.
Caveat: This increases size on disk due to the joining along the time index (lots of null values).
But: Makes it easier to query and grab data for the proxy data assimilation experiments.
:param datadir :
:param proxy_def :
:param metaout :
:param dataout :
:return:
Author: <NAME>, Univ. of Washington, Jan 2016.
"""
# ===============================================================================
# Upload proxy data from NCDC-formatted text files
# ===============================================================================
# List filenames im the data directory (dirname)
# files is a python list contining file names to be read
sites_data = glob.glob(datadir+"/*.txt")
nbsites = len(sites_data)
# Master list containing dictionaries of all proxy chronologies extracted from the data files.
master_proxy_list = []
master_duplicate_list = []
# Loop over files
nbsites_valid = 0
duplicate_list = []
for file_site in sites_data:
proxy_list, duplicate_list = read_proxy_data_NCDCtxt(file_site,proxy_def)
if eliminate_duplicates and duplicate_list:
master_duplicate_list.extend(duplicate_list)
if proxy_list: # if returned list is not empty
# extract dictionary and populate the master proxy list
for item in proxy_list:
master_proxy_list.append(item)
nbsites_valid = nbsites_valid + 1
else: # returned list is empty, just move to next site
pass
nbduplicates = len(master_duplicate_list)
nbextracted = len(master_proxy_list)
# eliminate duplicates if option activated
if eliminate_duplicates:
final_proxy_list = []
master_proxy_siteIDs = [list(item.keys())[0].split(':')[0] for item in master_proxy_list]
inds = [i for i, item in enumerate(master_proxy_siteIDs) if item not in master_duplicate_list]
nbduplicates = len(master_proxy_list) - len(inds)
# extract those not in list of duplicates
for k in inds: final_proxy_list.append(master_proxy_list[k])
else:
final_proxy_list = master_proxy_list
# ===============================================================================
# Produce a summary of uploaded proxy data &
# generate integrated database in pandas DataFrame format
# ===============================================================================
# Summary of the final_proxy_list
nbchronol = len(final_proxy_list)
print(' ')
print(' ')
print('----------------------------------------------------------------------')
print(' SUMMARY: ')
print(' Total nb of files found & queried : ', nbsites)
print(' Total nb of files with valid data : ', nbsites_valid)
print(' Number of proxy chronologies extracted : ', nbextracted)
print(' Number of identified duplicate chronologies : ', nbduplicates)
print(' Number of proxy chronologies included in df : ', nbchronol)
print(' ------------------------------------------------------')
tot = []
# Loop over proxy types specified in *main*
counter = 0
# Build up pandas DataFrame
metadf = pd.DataFrame()
# for v0.0.0:
#headers = ['Proxy ID','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement','Resolution (yr)',\
# 'Oldest (C.E.)','Youngest (C.E.)','Location','Sensitivity','Relation_to_temp','Databases']
# for v0.1.0:
headers = ['Proxy ID','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement','Resolution (yr)',\
'Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
'Seasonality', 'Databases']
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
# change the associated between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_def[key] = [item+'_breit' for item in proxy_def[key]]
nb = []
for item in final_proxy_list:
siteID = list(item.keys())[0]
if item[siteID]['Archive'] == proxy_archive and item[siteID]['Measurement'] in proxy_def[key]:
nb.append(siteID)
# *** for v.0.0.0:
#frame = pd.DataFrame({'a':siteID, 'b':item[siteID]['SiteName'], 'c':item[siteID]['Lat'], 'd':item[siteID]['Lon'], \
# 'e':item[siteID]['Elevation'], 'f':item[siteID]['Archive'], 'g':item[siteID]['Measurement'], \
# 'h':item[siteID]['Resolution (yr)'], 'i':item[siteID]['YearRange'][0], \
# 'j':item[siteID]['YearRange'][1], 'k':item[siteID]['Location'], \
# 'l':item[siteID]['Sensitivity'], 'm':item[siteID]['Relation_to_temp'], 'n':None}, index=[counter])
## To get database *list* into column 'm' of DataFrame
#frame.set_value(counter,'n',item[siteID]['Databases'])
## Append to main DataFrame
#metadf = metadf.append(frame)
# *** for v.0.1.0:
frame = pd.DataFrame({'a':siteID, 'b':item[siteID]['SiteName'], 'c':item[siteID]['Lat'], 'd':item[siteID]['Lon'], \
'e':item[siteID]['Elevation'], 'f':item[siteID]['Archive'], 'g':item[siteID]['Measurement'], \
'h':item[siteID]['Resolution (yr)'], 'i':item[siteID]['YearRange'][0], \
'j':item[siteID]['YearRange'][1], 'k':item[siteID]['Location'], \
'l':item[siteID]['climateVariable'], 'm':item[siteID]['Realm'], \
'n':item[siteID]['climateVariableDirec'], 'o':None, 'p':None}, index=[counter])
# To get seasonality & databases *lists* into columns 'o' and 'p' of DataFrame
frame.set_value(counter,'o',item[siteID]['Seasonality'])
frame.set_value(counter,'p',item[siteID]['Databases'])
# Append to main DataFrame
metadf = metadf.append(frame)
counter = counter + 1
print(' ', '{:40}'.format(key), ' : ', len(nb))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' ','{:40}'.format('Total:'), ' : ', nbtot)
print('----------------------------------------------------------------------')
print(' ')
# Redefine column headers
metadf.columns = headers
# Write metadata to file
print('Now writing metadata to file:', metaout)
metadf.to_pickle(metaout)
# -----------------------------------------------------
# Build the proxy **data** DataFrame and output to file
# -----------------------------------------------------
print(' ')
print('Now creating & loading the data in the pandas DataFrame...')
print(' ')
counter = 0
for item in final_proxy_list:
siteID = list(item.keys())[0]
years = item[siteID]['Years']
data = item[siteID]['Data']
[nbdata,] = years.shape
# Load data in numpy array
frame_data = np.zeros(shape=[nbdata,2])
frame_data[:,0] = years
frame_data[:,1] = data
if counter == 0:
# Build up pandas DataFrame
header = ['Proxy ID', siteID]
df = | pd.DataFrame({'a':frame_data[:,0], 'b':frame_data[:,1]}) | pandas.DataFrame |
"""
Train VGG19
import os
os.system("pip install -U efficientnet")
"""
import argparse
import configparser
import datetime
import os
import keras.backend as K
import numpy as np
import pandas as pd
import tensorflow as tf
from efficientnet import EfficientNetB5, preprocess_input
from keras.applications.densenet import DenseNet201
from keras.applications.densenet import \
preprocess_input as densenet_preprocess_input
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input as vgg16_preprocess_input
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input as vgg19_preprocess_input
from keras.callbacks import (EarlyStopping, LearningRateScheduler,
ModelCheckpoint, TensorBoard)
from keras.layers import Dense, Dropout, GlobalAveragePooling2D
from keras.models import Model, Sequential, load_model
from keras.optimizers import SGD, Adam
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.utils.data_utils import get_file
from PIL import Image, ImageFile
from sklearn.metrics import (classification_report, cohen_kappa_score,
confusion_matrix)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
ImageFile.LOAD_TRUNCATED_IMAGES = True
###################################################################################################
# Arguments for setting parameters while running array batch job
###################################################################################################
OPTIMISER_MODE = "Adam"
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-c", "--config_file", type=str, required=True,
help="Configuration file path")
PARSER.add_argument("-e", "--epochs", type=int, default=None,
help="Epochs")
ARGS = vars(PARSER.parse_args())
CONFIG_FILE = ARGS["config_file"]
CONFIG = configparser.ConfigParser()
CONFIG.read(CONFIG_FILE)
# Set verbosity
NAME = CONFIG["general"]["name"]
VERBOSITY = int(CONFIG["general"]["verbosity"])
# Set model configuration
PRETRAINED_WEIGHTS = CONFIG["model"].get(
"pretrained_weights_path", fallback=None)
LOSS = CONFIG["model"]["loss"]
METRICS = CONFIG["model"]["metrics"].split(",")
# Dataset folder information
TRAINING_CSV = "../input/training-labels.csv"
DATASET_FOLDER = "../input/train/output_combined2"
TESTSET_FOLDER = "../input/test/Test"
CURRENT_TIMESTAMP = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
OUTPUT_FOLDER = "../output"
CURRENT_OUTPUT = os.path.join(OUTPUT_FOLDER, f"{NAME}_{CURRENT_TIMESTAMP}")
LOGS_FOLDER = os.path.join(CURRENT_OUTPUT, "./logs")
WEIGHTS_FOLDER = os.path.join(CURRENT_OUTPUT, "./weights")
REPORTS_FOLDER = os.path.join(CURRENT_OUTPUT, "./reports")
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
os.makedirs(CURRENT_OUTPUT, exist_ok=True)
os.makedirs(LOGS_FOLDER, exist_ok=True)
os.makedirs(WEIGHTS_FOLDER, exist_ok=True)
os.makedirs(REPORTS_FOLDER, exist_ok=True)
CLASSIFICATION_REPORT_FILE = os.path.join(
REPORTS_FOLDER, "classification_report.txt")
CONFUSION_MATRIX_FILE = os.path.join(REPORTS_FOLDER, "confusion_matrix.txt")
COHEN_KAPPA_SCORE_FILE = os.path.join(REPORTS_FOLDER, "cohen_kappa_metric.txt")
# Image augmentation parameters
IMAGE_AUGMENTATION = CONFIG["image_augmentation"]
HEIGHT = int(IMAGE_AUGMENTATION["height"])
WIDTH = int(IMAGE_AUGMENTATION["width"])
DEPTH = int(IMAGE_AUGMENTATION["depth"])
SHIFT = float(IMAGE_AUGMENTATION["shift"])
ROTATION = float(IMAGE_AUGMENTATION["rotation"])
VAL_AUG_FACTOR = float(
IMAGE_AUGMENTATION["validation_data_augmentation_factor"])
# Hyperparameters
HYPERPARAMETERS = CONFIG["hyperparameters"]
# Set epochs from args if set else from config file
EPOCHS = ARGS["epochs"] if ARGS["epochs"] else int(HYPERPARAMETERS["epochs"])
BATCH_SIZE = int(HYPERPARAMETERS["batch_size"])
LEARNING_RATE = float(HYPERPARAMETERS["learning_rate"])
DROP_EVERY = float(HYPERPARAMETERS["learning_rate_decay_after_x_epoch"])
DROP_FACTOR = float(HYPERPARAMETERS["decay_rate"])
MOMENTUM = float(HYPERPARAMETERS["momentum"])
# Image generator information
TRAIN_TEST_VAL_SPLIT = CONFIG["train_test_val_split"]
DEBUG_MODE = "DEBUG" in CONFIG
TEST_SPLIT = float(TRAIN_TEST_VAL_SPLIT["test_split"])
VALIDATION_SPLIT = float(TRAIN_TEST_VAL_SPLIT["validation_split"])
##################################################################################################
# Switch models
###################################################################################################
BASE_MODEL = None
preprocessing_function = None
if NAME == "VGG19":
HEIGHT, WIDTH = 224, 224
BASE_MODEL = VGG19(include_top=False, weights="imagenet",
input_shape=(HEIGHT, WIDTH, DEPTH))
preprocessing_function = vgg19_preprocess_input
elif NAME == "VGG16":
HEIGHT, WIDTH = 224, 224
BASE_MODEL = VGG16(include_top=False, weights="imagenet",
input_shape=(HEIGHT, WIDTH, DEPTH))
preprocessing_function = vgg16_preprocess_input
elif NAME == "Densenet":
HEIGHT, WIDTH = 128, 128
BASE_MODEL = DenseNet201(include_top=False, weights="imagenet",
input_shape=(HEIGHT, WIDTH, DEPTH))
preprocessing_function = densenet_preprocess_input
elif NAME == "efficientnet":
HEIGHT, WIDTH = 128, 128
BASE_MODEL = EfficientNetB5(include_top=False, weights="imagenet",
input_shape=(HEIGHT, WIDTH, DEPTH))
preprocessing_function = preprocess_input
else:
HEIGHT, WIDTH = 224, 224
BASE_MODEL = VGG19(include_top=False, weights="imagenet",
input_shape=(HEIGHT, WIDTH, DEPTH))
preprocessing_function = vgg19_preprocess_input
##################################################################################################
# Read details from CSV
###################################################################################################
DATASET = pd.read_csv(TRAINING_CSV, dtype=str)
# DEBUG_MODE SET
DATASET = DATASET[:BATCH_SIZE*12] if DEBUG_MODE else DATASET
TRAIN_VALIDATION, TEST = train_test_split(DATASET, test_size=TEST_SPLIT)
TRAIN, VALIDATION = train_test_split(
TRAIN_VALIDATION, test_size=VALIDATION_SPLIT)
# KAGGLE TESTSET
TESTSET_ARRAY = [[filename, "0"]
for filename in os.listdir(TESTSET_FOLDER)]
TESTSET = | pd.DataFrame(TESTSET_ARRAY, columns=["Id", "Expected"]) | pandas.DataFrame |
""" This script reproduces content of Fig. 4 & Table 1 in the manuscript. """
import mne
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import helper
plt.ion()
results_dir = '../results/model_bursts/'
channels = ['C3-lap', 'C4-lap', 'Pz-lap']
labels = ['Laplacian %s' % (channel.split('-')[0]) for channel in channels]
fs = 512
mask_params = dict(marker='.', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4, alpha=0.9)
patterns = | pd.read_csv('../results/mean_laplacian_patterns.csv', index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [ | Series([True, True]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 27 14:35:44 2018
@author: RUFIAR1
"""
import pandas as pd
import locale
locale.setlocale(locale.LC_TIME, "en_US.UTF-8")
def plot_finance_data(finance_data):
finance_data['All Data']=finance_data['Material'].astype('str')+','+finance_data['Fiscal year/period']
tras_finance_data=finance_data.drop(['Fiscal year/period','Material'],axis=1).set_index('All Data').T
tras_finance_data.index=pd.to_datetime(tras_finance_data.index,format='%b %Y')
tras_finance_data=tras_finance_data.sort_index()
tras_finance_data.plot.line(title='Finance data')
'''done'''
def plot_market_data(market_data):
market_data['All Data']=market_data['Molecule']+','+market_data['Manufacturer']+','+market_data['Product']+','+market_data['Pack']
market_data=market_data.drop(columns=['Molecule','Manufacturer','Product','Pack'])
market_data=market_data.set_index("All Data").T
market_data.index=pd.to_datetime(market_data.index,format='%d/%m/%Y')
market_data=market_data.sort_index()
market_data.sort_index().plot.line(title='Market data',subplots=True)
'''done'''
def plot_sales_data(sales_data):
sales_data['All Data']=sales_data['Product'].astype('str')+','+sales_data['material'].astype('str')+','+ sales_data['CAUSALE']
sales_data=sales_data.drop(columns=['CAUSALE','NOTE','material','Product','inc IMS','AVG VENDUTO 2018','AVG FCST Q1','%','%','Stock','LF2','LF2 YTG (from JULY)','TOTALE'])
sales_data=sales_data.set_index("All Data").T
sales_data.index=pd.to_datetime(sales_data.index,format='%b')
sales_data.sort_index().plot.line(title='Sales data')
def plot_transformed_sales_data(transformed_data):
transformed_data.index=pd.to_datetime(transformed_data.index,format='%b %Y')
transformed_data.sort_index().plot.line(title='Sales data transformed')
'''done'''
def plot_forecast_data(forecast_data):
forecast_data['All Data']=forecast_data['Material'].astype('str')
forecast_data=forecast_data.drop(columns=['Material'])
forecast_data=forecast_data.set_index("All Data").T
forecast_data.index= | pd.to_datetime(forecast_data.index) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
##########
Processing
##########
*Created on Thu Jun 1 14:15 2017 by <NAME>*
Processing results from the CellPainting Assay in the Jupyter notebook.
This module provides the DataSet class and its methods.
Additional functions in this module act on pandas DataFrames."""
import time
import glob
import os.path as op
from collections import Counter
import xml.etree.ElementTree as ET
import pickle
import pandas as pd
import numpy as np
from rdkit.Chem import AllChem as Chem
from rdkit import DataStructs
from IPython.core.display import HTML
from . import tools as cpt
from .config import ACT_PROF_PARAMETERS
from .config import LIMIT_SIMILARITY_L, LIMIT_CELL_COUNT_L, LIMIT_ACTIVITY_L
try:
from misc_tools import apl_tools
AP_TOOLS = True
#: Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
from . import resource_paths as cprp
except ImportError:
from . import resource_paths_templ as cprp
print("* Resource paths not found, stub loaded.")
print(" Automatic loading of resources will not work,")
print(" please have a look at resource_paths_templ.py")
FINAL_PARAMETERS = ['Metadata_Plate', 'Metadata_Well', 'plateColumn', 'plateRow',
"Compound_Id", 'Container_Id', "Well_Id", "Producer", "Pure_Flag", "Toxic",
"Rel_Cell_Count", "Known_Act", "Trivial_Name", 'WellType', 'Conc_uM',
"Activity", "Act_Profile", "Plate", "Smiles"]
DROP_FROM_NUMBERS = ['plateColumn', 'plateRow', 'Conc_uM', "Compound_Id"]
DROP_GLOBAL = ["PathName_CellOutlines", "URL_CellOutlines", 'FileName_CellOutlines',
'ImageNumber', 'Metadata_Site', 'Metadata_Site_1', 'Metadata_Site_2']
QUANT = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
DEBUG = False
def debug_print(txt, val):
if DEBUG:
txt = txt + ":"
print("DEBUG {:20s}".format(txt), val)
class DataSet():
def __init__(self, log=True):
self.data = pd.DataFrame()
self.fields = {"plateColumn": "Metadata_Plate",
"WellType": "WellType", "ControlWell": "Control", "CompoundWell": "Compound"}
self.log = log
def __getitem__(self, item):
res = self.data[item]
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log("subset")
else:
result = res
return result
def __getattr__(self, name):
"""Try to call undefined methods on the underlying pandas DataFrame."""
def method(*args, **kwargs):
res = getattr(self.data, name)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log(name)
else:
result = res
return result
return method
def show(self):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
print("Shape: ", self.shape)
print("Parameters:", parameters)
return HTML(self.data[parameters]._repr_html_())
def head(self, n=5):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
res = self.data[parameters].head(n)
result = DataSet()
result.data = res
result.print_log("head")
return result
def drop_cols(self, cols, inplace=False):
"""Drops the list of columns from the DataFrame.
Listed columns that are not present in the DataFrame are simply ignored
(no error is thrown)."""
if inplace:
drop_cols(self.data, cols, inplace=True)
self.print_log("drop cols (inplace)")
else:
result = DataSet()
result.data = drop_cols(self.data, cols, inplace=False)
result.print_log("drop cols")
return result
def keep_cols(self, cols, inplace=False):
if inplace:
self.data = self.data[cols]
self.print_log("keep cols (inplace)")
else:
result = DataSet()
result.data = self.data[cols]
result.print_log("keep cols")
return result
def print_log(self, component, add_info=""):
if self.log:
print_log(self.data, component, add_info)
def load(self, fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
self.data = load(fn, sep=sep).data
self.print_log("load data")
def write_csv(self, fn, parameters=None, sep="\t"):
result = self.data.copy()
if isinstance(parameters, list):
result = result[parameters]
result.to_csv(fn, sep=sep, index=False)
def write_pkl(self, fn):
self.data.to_pickle(fn)
def write_parameters(self, fn="parameters.txt"):
parameters = sorted(self.measurements)
with open("parameters.txt", "w") as f:
f.write('"')
f.write('",\n"'.join(parameters))
f.write('"')
print(len(parameters), "parameters written.")
def describe(self, times_mad=3.0):
df = numeric_parameters(self.data)
stats = pd.DataFrame()
stats["Min"] = df.min()
stats["Max"] = df.max()
stats["Median"] = df.median()
stats["MAD"] = df.mad()
stats["Outliers"] = df[(((df - df.median()).abs() - times_mad * df.mad()) > 0)].count()
print(self.shape)
return stats
def well_type_from_position(self):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = DataSet(log=self.log)
result.data = well_type_from_position(self.data)
result.print_log("well type from pos")
return result
def well_from_position(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Assign Metadata_Well from plateRow, plateColumn"""
result = DataSet(log=self.log)
result.data = well_from_position(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("well from pos")
return result
def position_from_well(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Generate plateRow and plateColumn from Metatadata_Well"""
result = DataSet(log=self.log)
result.data = position_from_well(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("pos from well")
return result
def join_layout_384(self, layout_fn, on="Address_384"):
result = DataSet(log=self.log)
result.data = join_layout_384(self.data, layout_fn, on=on)
result.print_log("join layout 384")
return result
def join_layout_1536(self, plate, quadrant, on="Address_384", how="inner"):
"""Cell Painting is always run in 384er plates.
COMAS standard screening plates are format 1536.
With this function, the 1536-to-384 reformatting file
with the smiles added by join_smiles_to_layout_1536()
can be used directly to join the layout to the individual 384er plates."""
result = DataSet(log=self.log)
result.data = join_layout_1536(self.data, plate, quadrant, on=on, how=how)
result.print_log("join layout 1536")
return result
def numeric_parameters(self):
result = DataSet()
result.data = numeric_parameters(self.data)
return result
def flag_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Flag data rows of toxic compounds"""
result = DataSet()
result.data = flag_toxic(self.data, cutoff=cutoff)
flagged = result.data["Toxic"].sum()
result.print_log("flag toxic", "{:3d} flagged".format(flagged))
return result
def remove_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Remove data rows of toxic compounds"""
result = DataSet()
toxic = DataSet()
result.data, toxic.data = remove_toxic(self.data, cutoff=cutoff)
result.print_log("remove toxic", "{:3d} removed".format(toxic.shape[0]))
return result, toxic
def remove_impure(self, strict=False, reset_index=True):
"""Remove entries with `Pure_Flag == "Fail"`"""
result = DataSet()
flagged = DataSet()
result.data, flagged.data = remove_impure(self.data)
result.print_log("remove impure", "{:3d} removed".format(flagged.shape[0]))
return result, flagged
def remove_outliers(self, times_dev=3.0, group_by=None, method="median"):
"""Returns the filtered dataframe as well as the outliers.
method can be `median`or `mean` """
result = DataSet()
outliers = DataSet()
result.data, outliers.data = remove_outliers(self.data, times_dev=times_dev,
group_by=group_by, method=method)
result.print_log("remove outliers", "{:3d} removed".format(outliers.shape[0]))
return result, outliers
def remove_skipped_echo_direct_transfer(self, fn):
"""Remove wells that were reported as skipped in the Echo protocol (xml).
This functions works with Echo direct transfer protocols.
Function supports using wildcards in the filename, the first file will be used.
Returns a new dataframe without the skipped wells."""
result = DataSet()
result.data, skipped = remove_skipped_echo_direct_transfer(self.data, fn=fn)
skipped_str = "(" + ", ".join(skipped) + ")"
result.print_log("remove skipped", "{:3d} skipped {}".format(self.shape[0] - result.shape[0],
skipped_str))
return result
def drop_dups(self, cpd_id="Compound_Id"):
"""Drop duplicate Compound_Ids"""
result = DataSet()
result.data = self.data.drop_duplicates(cpd_id)
result.print_log("drop dups")
return result
def group_on_well(self, group_by=FINAL_PARAMETERS):
"""Group results on well level."""
result = DataSet()
result.data = group_on_well(self.data, group_by=group_by)
result.print_log("group on well")
return result
def join_batch_data(self, df_data=None, how="left", fillna="n.d."):
"""Join data by Batch_Id."""
result = DataSet()
result.data = join_batch_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join batch data")
return result
def join_container_data(self, df_data=None, how="left", fillna=""):
"""Join data by Container_Id."""
result = DataSet()
result.data = join_container_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join cntnr data")
return result
def join_container(self, cont_data=None, how="inner"):
result = DataSet(log=self.log)
result.data = join_container(self.data, cont_data=cont_data, how=how)
result.print_log("join container")
return result
def join_smiles(self, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
result = DataSet()
result.data = join_smiles(self.data, df_smiles=df_smiles, how=how)
result.print_log("join smiles")
return result
def join_annotations(self):
"""Join Annotations from Compound_Id."""
result = DataSet()
result.data = join_annotations(self.data)
result.print_log("join annotations")
return result
def add_dmso(self):
"""Add DMSO to references."""
result = DataSet()
result.data = add_dmso(self.data)
result.print_log("add DMSO")
return result
def poc(self, group_by=None, well_type="WellType", control_name="Control"):
"""Normalize the data set to Percent-Of-Control per group (e.g. per plate)
based on the median of the controls.
Parameters:
group_by (string or None): optional column by which the calculation should be grouped,
e.g. the column with plate name."""
result = DataSet()
result.data = poc(self.data, group_by=group_by)
self.print_log("POC")
return result
def activity_profile(self, mad_mult=3.5, parameters=ACT_PROF_PARAMETERS, only_final=True):
"""Generates the `Act_Profile` column.
The byte is set when the parameter's value is greater (or smaller)
than parameter_ctrl.median() + (or -) `mad_mult`* parameter.mad()
If a list of parameters is given, then the activity profile will be calculated
for these parameters.
If `only_final` == `True`, then only the parameters listed in `FINAL_PARAMETERS`
are kept in the output_table.
Returns a new Pandas DataFrame."""
result = DataSet()
result.data = activity_profile(self.data, mad_mult=mad_mult, parameters=parameters,
only_final=only_final)
result.print_log("activity profile")
return result
def relevant_parameters(self, ctrls_std_rel_min=0.001,
ctrls_std_rel_max=0.10):
result = DataSet()
result.data = relevant_parameters(self.data, ctrls_std_rel_min=ctrls_std_rel_min,
ctrls_std_rel_max=ctrls_std_rel_max)
num_parm = len(result.measurements)
result.print_log("relevant parameters", "{:.3f}/{:.3f}/{:4d}"
.format(ctrls_std_rel_min, ctrls_std_rel_max, num_parm))
return result
def correlation_filter(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (mad)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def correlation_filter_std(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter_std(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (std)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def add_act_profile_for_control(self, parameters=ACT_PROF_PARAMETERS):
# Compound_Id DMSO: 245754
control = {"Compound_Id": 245754, "Trivial_Name": "Control", "Activity": 0,
"Act_Profile": "".join(["1"] * len(parameters))}
ck = control.keys()
for k in ck:
if k not in self.data.keys():
control.pop(k)
tmp = | pd.DataFrame(control) | pandas.DataFrame |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = | Period(freq="A", year=2007) | pandas.Period |
import json
import pandas as pd
import argparse
import os
import numpy as np
# from pretrained_model_list import MODEL_PATH_LIST
# import promptsource.templates
from tqdm import tqdm
import ipdb
def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (:obj:`str`): The text to clean up.
Returns:
:obj:`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
.replace("\n\n", " ")
.replace("\n", " ")
.replace("\r", " ")
)
return out_string
def load_prompts(opt):
if opt.prompt_set == "nlibias":
return load_bbnli(opt)
prompts = []
if opt.prompt_set == "bold":
pth = os.path.join("data", opt.prompt_set, "prompts", opt.prompt_domain+"_prompt.json")
with open(pth) as f:
for line in f:
prompts.append(json.loads(line))
prompts = prompts[0]
prompts_df = pd.DataFrame(columns = ["Name", "Group", "Prompt"])
for group, content in prompts.items():
for name, prompt_l in content.items():
for prompt in prompt_l:
if prompt.strip != "":
prompts_df.loc[len(prompts_df)] = [name, group, prompt]
elif opt.prompt_set == "honest":
pth = os.path.join("honest/resources/en_template.tsv")
prompts_df = | pd.read_csv(pth, sep="\t") | pandas.read_csv |
import pandas as pd
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import median_absolute_error as mdae
from sklearn.metrics import explained_variance_score as evs
from sklearn.metrics import r2_score as r2
import numpy as np
def rmse(y, p):
return mse(y, p)**0.5
rkf = RepeatedKFold(n_splits=10, n_repeats=10)
data = | pd.read_csv('C:\\Users\\<NAME>\\Documents\\Research Projects\\Forecast of Rainfall Quantity and its variation using Envrionmental Features\\Data\\Normalized & Combined Data\\All Districts.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([ | pd.Timestamp('2011-01-01', tz=tz) | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin( | pd.DataFrame(data['diff']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 16:46:50 2021
@author: <NAME>
"""
#This script prepares phenotypes and covariates for associtatoin analysis
"""Several filtering and QC steps are applied, filter masks and overview plots created. As last steps files are reformatted to BGENIE standards
"""
#Import stuff
import pandas as pd
import os
import numpy as np
from statsmodels import robust
import matplotlib.pyplot as plt
import seaborn as sns
import csv
#-------------------------------------------
# create directory for DTI output
#------------------------------------------
pathDTI='/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/final/european/DTI'
if not os.path.exists(pathDTI):
os.mkdir(pathDTI)
#------------------------------------------------------------------------------
# 1 Get the overall sample
#------------------------------------------------------------------------------
#Load list of IDs that survived genetic QC
gen_filter_wb = pd.read_csv ('/data/workspaces/lag/workspaces/lg-ukbiobank/derived_data/genetic_data/snp/subset_imagingT1_40k/v1_white_british_ancestry/SQC/imagingT1_ind_list_sqc_postrelatedness.txt',header = None, names=['eid'])
#Load ID's of current sample
eid = pd.read_csv('/data/workspaces/lag/workspaces/lg-ukbiobank/primary_data/current_release/ukb43760/ukb43760.csv', sep =",", usecols =[0])
mask_current_release = eid['eid'].isin(gen_filter_wb['eid'])
# EUROPEAN SAMPLE
#Load list of IDs that survived genetic QC
gen_filter_we = pd.read_csv ('/data/workspaces/lag/workspaces/lg-ukbiobank/derived_data/genetic_data/snp/subset_imagingT1_40k/v2_white_ancestry/SQC/imagingT1_wa_ind_list_sqc_postrelatedness.txt',header = None, names=['eid'])
#Load ID's of current sample
mask_european = eid['eid'].isin(gen_filter_we['eid'])
# #Load health related filter: We only want neuro healthy particpants
mask_neurohealthy = pd.read_csv ('/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/UKBB/ICD_diagnosis/ukb43760/mask_neuro_healthy_ukb43760.dat', header = None)
#also load the list of unusable T1's and fitler by these as the freesurfer output is likely corrupted
unusable_t1 = pd.read_fwf ('/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/UKBB/unusable_T1/noT1_log.txt', header = None)
unusable_t1.columns =['eid']
mask_unusable = ~eid['eid'].isin(unusable_t1['eid'])
#---------------------------------------------------------------------
# now get overal maske and EID of current sample
enigma_evol_european_DTI = pd.concat([mask_european, mask_neurohealthy, mask_unusable], axis =1)
enigma_evol_european_DTI = enigma_evol_european_DTI.all(axis =1)
eid_enigma_evol_european_DTI = eid[enigma_evol_european_DTI]
#save
enigma_evol_european_DTI.to_csv('/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/final/european/preprocessing/mask_enigmaEvol_europeanDTI_ukb43760.dat', index = False, header = False)
eid_enigma_evol_european_DTI.to_csv('/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/final/european/preprocessing/eid_enigmaEvol_europeanDTI_ukb43760.dat',header =False, index = False)
#---------------------------------------------------------
#2 Now we extract DTI TBSS mean FA values
#--------------------------------------------------------------
DTI_TBSS =pd.read_csv('/data/workspaces/lag/workspaces/lg-ukbiobank/primary_data/current_release/ukb43760/ukb43760.csv',usecols=[0,*range(5290,5385,2)], dtype = object)
#now we filter the phenotype by availability of DTI metrics and previous exclusioin criteria
DTI_european = DTI_TBSS[enigma_evol_european_DTI].copy()
DTI_european_noNaN=DTI_european.dropna(how = 'any').copy()
cols=list(DTI_european_noNaN.iloc[:,1:])
DTI_european_noNaN[cols]=DTI_european_noNaN[cols].astype(float)
DTI_european_noNaN['eid']=DTI_european_noNaN.eid.astype(str)
#save as a CSV excel file
DTI_european_noNaN.to_excel('/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/final/european/DTI/european_DTITBSS.xlsx', index= None)
#--------------------------------------------------------------------------------
# 3 PHENOTYPE QUALITY CONTROL
#-----------------------------------------------------------------------------------
global_DTI=DTI_european_noNaN.iloc[:,1:7]
right_DTI=DTI_european_noNaN.iloc[:,7:48:2]
left_DTI=DTI_european_noNaN.iloc[:,8:49:2]
lower_bound =[]
upper_bound =[]
for column in global_DTI.iloc[:,0:]:
lower = global_DTI[column].median() - 5*robust.mad(global_DTI[column])
upper= global_DTI[column].median() + 5*robust.mad(global_DTI[column])
lower_bound.append(lower)
upper_bound.append(upper)
##empty dataframe
global_DTI_qc = pd.DataFrame()
#check each column for outliers, make qc'ed dataframe
for ind, column in enumerate(global_DTI.iloc[:,0:].columns):
global_DTI_qc[column] = global_DTI[column].mask((global_DTI[column] < lower_bound[ind]) | (global_DTI[column] > upper_bound[ind]))
# do the same thing for left right but here we keep the N similar across hemispheres
lofdfs = [right_DTI, left_DTI]
rep_pheno_qc = {}
for key,df in enumerate(lofdfs):
#QC each by using the IQR
lower_bound =[]
upper_bound =[]
for column in df.iloc[:,0:]:
lower =df[column].median() - 5*robust.mad(df[column])
upper= df[column].median() + 5*robust.mad(df[column])
lower_bound.append(lower)
upper_bound.append(upper)
qc=pd.DataFrame()
for ind, column in enumerate(df.iloc[:,0:].columns):
qc[column] = df[column].mask((df[column] < lower_bound[ind]) | (df[column] > upper_bound[ind]))
#also immediatelly delete the global outliers
rep_pheno_qc[key]=qc
right_DTI_qc=rep_pheno_qc[0]
left_DTI_qc=rep_pheno_qc[1]
# quirk of pandas - can't work on columns in a global way otherwise so we save them to a list
dtiLE_col =left_DTI_qc.columns.values.tolist()
dtiRE_col =right_DTI_qc.columns.values.tolist()
#and replace them with numbers
left_DTI_qc.columns=np.arange(len(left_DTI_qc.columns))
right_DTI_qc.columns=np.arange(len(right_DTI_qc.columns))
#now we replace outliers with NAN by checking each dataframe frame and get sumstats
dtiLE_masked = left_DTI_qc.mask(right_DTI_qc.isna()).copy()
dtiRE_masked = right_DTI_qc.mask(left_DTI_qc.isna()).copy()
#now name the columns again
dtiLE_masked.columns = dtiLE_col
dtiRE_masked.columns = dtiRE_col
#get all the QCed data in one dataframe, add eid for later use
final_DTI_qc =pd.concat([DTI_european_noNaN['eid'],global_DTI_qc,dtiLE_masked,dtiRE_masked],axis=1)
dtifinalsumstats = final_DTI_qc.iloc[:,1:].describe()
dtifinalsumstats.loc['percentange',:] =dtifinalsumstats.loc['count',:] / len(DTI_european_noNaN.index)
dtifinalsumstats.to_csv('{}/ukb43760_dtiTBSS_QCed_sumstats.csv'.format(pathDTI))
#save EIDs for later use
final_DTI_qc['eid'].to_csv('{}/eid_ukb43760_dtiTBSS.dat'.format(pathDTI),index=None,header=None)
#get the overall ID list to create final mask
eid = pd.read_csv('/data/workspaces/lag/workspaces/lg-ukbiobank/primary_data/current_release/ukb43760/ukb43760.csv', sep =",", usecols =[0])
mask_QCed = eid['eid'].isin(final_DTI_qc['eid'])
#let's print out the N just as a sanity check
mask_QCed.sum()
#save the final mask - this combines neuro healthy, imaging, pheno QC and genetic sample QC
mask_QCed.to_csv('{}/mask_ukb43760_dtiTBSS.dat'.format(pathDTI),index=None,header=None)
final_DTI_qc.to_excel('{}/ukb43760_dtiTBSS_qced.xlsx'.format(pathDTI),sheet_name='ukb43760_dtiTBSS_FA')
# quick check if all used EIDs have been used for surface based metrics --> do we need to rerun subsetting?
eid_surfacebased=pd.read_csv('/data/workspaces/lag/workspaces/lg-ukbiobank/projects/enigma_evol/final/european/preprocessing/eid_afterQC_european_ukb43760.dat',header=None,dtype=str)
eid_surfacebased.columns=['eid']
#chekch the overlap
overlap=final_DTI_qc['eid'].isin(eid_surfacebased['eid'])
print ('the differnce between this and the original sample is',len(final_DTI_qc) - overlap.sum())
#---------------------------------------------------------------------------
# 4 PLOT all regional metrics after QC as well
#--------------------------------------------------------------------------
# make violinplot with Seaborn
# loop over it to make it more readable
plt.figure(figsize=(30,7))
bplot=sns.violinplot(y='value', x='variable',
data= | pd.melt(final_DTI_qc.iloc[:,1:]) | pandas.melt |
# standard imports
import os
import glob
import inspect
from pprint import pprint
import pickle as pkl
import copy
import pandas as pd
import numpy as np
from tqdm import tqdm
import logging
import subprocess
import warnings
import itertools
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.visualization import ZScaleInterval
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', AstropyWarning)
try:
from p_tqdm import p_map
_parallel = True
except ModuleNotFoundError:
print('package "p_tqdm" not installed, cannot do parallel processing')
_parallel = False
# internal imports
import LOSSPhotPypeline
import LOSSPhotPypeline.utils as LPPu
from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames
# setup tqdm for pandas
tqdm.pandas()
class LPP(object):
'''Lick Observatory Supernova Search Photometry Reduction Pipeline'''
def __init__(self, targetname, interactive = True, parallel = True, cal_diff_tol = 0.05, force_color_term = False, max_display_phase = 120,
wdir = '.', cal_use_common_ref_stars = False, sep_tol = 8, pct_increment = 0.05, in_pct_floor = 0.8, autoloadsave = False):
'''Instantiation instructions'''
# basics from instantiation
self.targetname = targetname.replace(' ', '')
self.config_file = targetname + '.conf'
self.interactive = interactive
self.wdir = os.path.abspath(wdir) # working directory for running (particularly idl code)
if (parallel is True) and (_parallel) is True:
self.parallel = True
else:
self.parallel = False
self.cal_diff_tol = cal_diff_tol # starting calibration difference tolerance
self.abs_cal_tol = 0.2 # do not proceed with the pipeline if in non-interactive mode and cal tol exceeds this
self.min_ref_num = 2 # minimum number of ref stars
self.pct_increment = pct_increment # amount to increment percentage requirement down by if doing ref check
self.in_pct_floor = in_pct_floor # minimum percentage of images ref stars must be in if doing ref check
self.checks = ['filter', 'date'] # default checks to perform on image list
self.phase_limits = (-60, 2*365) # phase bounds in days relative to disc. date to keep if "date" check performed
self.cal_use_common_ref_stars = cal_use_common_ref_stars # override requirement that each image have all ref stars
self.sep_tol = sep_tol # radius around target in arcseconds to exclude candidate reference stars from
# log file
self.logfile = self.targetname.replace(' ', '') + '.log'
self.build_log()
# sourced from configuration file
self.targetra = None
self.targetdec = None
self.photsub = False
self.photmethod = 'all'
self.refname = 'TBD'
self.photlistfile = 'TBD'
# discovery date (mjd)
self.disc_date_mjd = None
# check if config file exists -- if not then generate template
if not os.path.exists(self.config_file):
self.log.warn('No configuration file detected, complete template ({}) before proceeding.'.format(self.config_file + '.template'))
LPPu.genconf(targetname = self.targetname, config_file = self.config_file + '.template')
return
# general variables
self.filter_set_ref = ['B', 'V', 'R', 'I', 'CLEAR']
self.first_obs = None
self.phot_cols = {'3.5p': 3, '5p': 5, '7p': 7, '9p': 9, '1fh': 11, '1.5fh': 13, '2fh': 15, 'psf': 17}
self.calmethod = 'psf' # can be set to any key in phot_cols, but recommended is 'psf'
self.image_list = [] # list of image file names
self.phot_instances = [] # Phot instance for each image
self.aIndex = [] # indices of all images in phot_instances
self.wIndex = [] # subset of aIndex to work on
self.bfIndex = [] # indices of images with unsupported filters
self.ucIndex = [] # indices of WCS fail images, even though _c
self.bdIndex = [] # indices of images with dates outside of phase boundaries
self.pfIndex = [] # indices of photometry failures
self.psfIndex = [] # indices of photometry (sub) failures
self.cfIndex = [] # indices of calibration failures
self.csfIndex = [] # indices of calibration (sub) failures
self.noIndex = []
self.nosIndex = []
self.mrIndex = pd.Index([]) # keep track of indices to remove manually
self.run_success = False # track run success
# calibration variables
self.cal_source = 'auto'
self.calfile = 'TBD'
self.calfile_use = 'TBD'
self.force_color_term = force_color_term
self.calibration_dir = 'calibration'
if not os.path.isdir(self.calibration_dir):
os.makedirs(self.calibration_dir)
self.radecfile = os.path.join(self.calibration_dir, self.targetname + '_radec.txt')
self.radec = None
self.cal_IDs = 'all'
self.cal_arrays = None
self.cal_force_clear = False
self.max_display_phase = max_display_phase # num days to show rel to disc for interactive calibration
# keep track of counts of color terms
self.color_terms = {'kait1': 0, 'kait2': 0, 'kait3': 0, 'kait4': 0,
'nickel1': 0, 'nickel2': 0,
'Landolt': 0}
self.color_terms_used = None
# load configuration file
loaded = False
while not loaded:
try:
self.loadconf()
loaded = True
except FileNotFoundError:
LPPu.genconf(targetname = self.targetname, config_file = self.config_file + '.template')
print('Configuration could not be loaded. Template generated: {}'.format(self.config_file + '.template'))
response = input('Specify configuration file (*****.conf) or q to quit > ')
if 'q' == response.lower():
return
else:
self.config_file = response
# lightcurve variables
self.lc_dir = 'lightcurve'
self.lc_base = os.path.join(self.lc_dir, 'lightcurve_{}_'.format(self.targetname))
self.lc_ext = {'raw': '_natural_raw.dat',
'bin': '_natural_bin.dat',
'group': '_natural_group.dat',
'standard': '_standard.dat',
'ul': '_natural_ul.dat'}
# galaxy subtraction variables
self.template_images = None
self.templates_dir = 'templates'
# data directories
self.data_dir = os.path.dirname(self.refname)
self.error_dir = self.data_dir + '_sim'
# steps in standard reduction procedure
self.current_step = 0
self.steps = [self.load_images,
self.check_images,
self.find_ref_stars,
self.match_refcal_stars,
self.do_galaxy_subtraction_all_image,
self.do_photometry_all_image,
self.get_sky_all_image,
self.do_calibration,
self.get_zeromag_all_image,
self.get_limmag_all_image,
self.generate_lc,
self.write_summary]
# save file
self.savefile = self.targetname.replace(' ', '') + '.sav'
if os.path.exists(self.savefile):
if self.interactive:
load = input('Load saved state from {}? ([y]/n) > '.format(self.savefile))
else:
load = 'n' # run fresh if in non-interactive mode
if autoloadsave :
load = 'y' # run fresh if in non-interactive mode, unless this keyword is set
if 'n' not in load.lower():
self.load()
# make sure that the selected calmethod is one of the photmethods
if self.calmethod not in self.photmethod:
self.log.warn('Calibration method must be one of the photometry methods. Exiting.')
return
###################################################################################################
# Configuration File Methods
###################################################################################################
def loadconf(self):
'''
reads config file and sets class attributes accordingly
the most accurate accounting of system state is stored in the binary savefile
'''
# load config file and try to standardize keys
conf = pd.read_csv(self.config_file, header = None, delim_whitespace = True, comment = '#',
index_col = 0, squeeze = True).replace(np.nan, '')
conf.index = conf.index.str.lower()
# read and set values (including the type)
self.targetra = float(conf['targetra'])
self.targetdec = float(conf['targetdec'])
if conf['photsub'].lower() == 'yes': # defaults to False in all other cases
self.photsub = True
if conf['calsource'].lower() in ['psf','sdss','apass']: # only set if a known source is specified
self.cal_source = conf['calsource'].lower()
if conf['photmethod'].lower() == 'all':
self.photmethod = list(self.phot_cols.keys())
elif ',' not in conf['photmethod'].lower():
if conf['photmethod'].lower().strip() in self.phot_cols.keys():
self.photmethod = [conf['photmethod'].lower().strip()]
else:
print('{} is not a valid photometry method. Available options are:'.format(conf['photmethod'].strip()))
print(', '.join(self.phot_col.keys()))
self.photmethod = input('Enter selection(s) > ').strip().replace(' ', '').split(',')
else:
proposed = conf['photmethod'].strip().split(',')
if set(proposed).issubset(set(self.phot_cols.keys())):
self.photmethod = proposed
else:
print('At least one of {} is not a valid photometry method. Available options are:'.format(conf['photmethod'].strip()))
print(', '.join(self.phot_cols.keys()))
self.photmethod = input('Enter selection(s) > ').strip().replace(' ', '').split(',')
self.refname = conf['refname']
self.photlistfile = conf['photlistfile']
if conf['forcecolorterm'].strip() in self.color_terms.keys():
self.force_color_term = conf['forcecolorterm'].strip()
self.log.info('{} loaded'.format(self.config_file))
###################################################################################################
# Logging
###################################################################################################
def build_log(self):
'''starts and sets up log'''
self.log = logging.getLogger('LOSSPhotPypeline')
self.log.setLevel(logging.DEBUG)
# don't duplicate entries
if self.log.hasHandlers():
self.log.handlers.clear()
# internal logging
fh = logging.FileHandler(self.logfile)
fh.setFormatter(logging.Formatter('%(asctime)s in %(funcName)s with level %(levelname)s ::: %(message)s'))
self.log.addHandler(fh)
# if in interactive mode, print log at or above INFO on screen
if self.interactive:
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(logging.Formatter('\n'+'*'*60+'\n%(message)s\n'+'*'*60))
self.log.addHandler(sh)
# used by contextlib to log all idl and bash outputs, while hiding from screen
self.log.write = lambda msg: self.log.debug('[external] ' + msg) if msg != '\n' else None
self.log.info('Welcome to the LOSS Photometry Pypeline (LPP)')
###################################################################################################
# UI / Automation Methods
###################################################################################################
def __iter__(self):
return self
def next(self, *args, **kwargs):
'''performs next reduction step (arguments for that step can be passed through)'''
if self.current_step < len(self.steps):
self.steps[self.current_step](*args, **kwargs)
self.current_step += 1
self.save()
self.summary()
else:
raise StopIteration
def skip(self):
'''skip current step'''
self.log.info('skipping step: {}'.format(self.steps[self.current_step].__name__))
self.go_to(self.current_step + 1)
self.summary()
def go_to(self, step = None):
'''go to specified step, or choose interactively'''
if type(step) == int:
self.current_step = step
self.summary()
else:
self.summary()
print('\nChoose an option:\n')
print('primary reduction steps:')
for i, step in enumerate(self.steps):
if i == self.current_step:
print('{} --- {} (current step)'.format(i, step.__name__))
else:
print('{} --- {}'.format(i, step.__name__))
print('\nadditional options:')
print('n --- add new image(s) by filename(s)')
print('nf --- add new images from file of names')
print('p --- plot light curve from file')
print('c --- cut points from specific light curve')
print('cr --- cut points from specific raw light curve and regenerate subsequent light curves')
print('q --- quit\n')
resp = input('selection > ').lower()
if 'n' == resp:
new_images = input('enter name(s) or new images (comma separated) > ')
if ',' not in new_images:
new_image_list = [new_images]
else:
new_image_list = [fl.strip() for fl in new_images.split(',')]
self.process_new_images(new_image_list = new_image_list)
elif 'nf' == resp:
new_image_file = input('enter name of new image file > ')
self.process_new_images(new_image_file = new_image_file)
elif 'p' == resp:
lc_file = input('enter light curve file (including relative path) to plot > ')
self.plot_lc([lc_file])
elif (resp == 'c') or (resp == 'cr'):
lc_file = input('enter light curve file (including relative path) to cut points from > ')
regenerate = False
if resp == 'cr':
regenerate = True
self.cut_lc_points(lc_file, regenerate = True)
else:
try:
self.current_step = int(resp)
except ValueError:
return
self.summary()
def save(self):
'''saves current state of pipeline'''
vs = vars(self).copy()
vs.pop('steps')
vs.pop('log')
with open(self.savefile, 'wb') as f:
pkl.dump(vs, f)
self.log.info('{} written'.format(self.savefile))
def load(self, savefile = None, summary = True):
'''re-initializes pipeline from saved state in file'''
if savefile is None:
savefile = self.savefile
with open(savefile, 'rb') as f:
vs = pkl.load(f)
for v in vs.keys():
s = 'self.{} = vs["{}"]'.format(v, v)
exec(s)
self.log.info('{} loaded'.format(savefile))
if summary:
self.summary()
def summary(self):
'''print summary of pipeline status'''
print('\n' + '*'*60)
print('Reduction status for {}'.format(self.targetname))
print('Interactive: {}'.format(self.interactive))
print('Photsub Mode: {}'.format(self.photsub))
print('*'*60 + '\n')
if self.current_step == 0:
print('Beginning of reduction pipeline.\n')
else:
print('Previous step: {}'.format(self.steps[self.current_step - 1].__name__))
print(self.steps[self.current_step - 1].__doc__ + '\n')
try:
print('--> Next step: {}'.format(self.steps[self.current_step].__name__))
print(self.steps[self.current_step].__doc__ + '\n')
except IndexError:
print('End of reduction pipeline.')
self.save()
return
try:
print('----> Subsequent step: {}'.format(self.steps[self.current_step + 1].__name__))
print(self.steps[self.current_step + 1].__doc__ + '\n')
except IndexError:
print('End of reduction pipeline.')
def run(self, skips = []):
'''run through reduction steps'''
while True:
if self.current_step in skips:
self.skip()
else:
try:
self.next()
except StopIteration:
break
def show_variables(self):
'''prints instance variables'''
pprint(vars(self))
def show_methods(self):
'''show available methods'''
print('method: docstring')
for name in LPP.__dict__.keys():
if name[:2] != '__' and name != 'show_methods':
print('{}: {}'.format(name, LPP.__dict__[name].__doc__))
###################################################################################################
# Reduction Pipeline Methods
###################################################################################################
def load_images(self):
'''reads image list file to generate lists of image names and Phot instances'''
self.image_list = pd.read_csv(self.photlistfile, header = None, delim_whitespace = True,
comment = '#', squeeze = True)
if self.interactive:
print('\nSelected image files')
print('*'*60 + '\n')
print(self.image_list)
print('\n')
self.log.info('image list loaded from {}'.format(self.photlistfile))
self.log.info('generating list of Phot instances from image list')
self.phot_instances = self._im2inst(self.image_list) # radec is None if running in order
# set indices
self.aIndex = self.image_list.index
self.wIndex = self.aIndex
def check_images(self):
'''only keep images that are in a supported filter and without file format issues'''
# filter check
if 'filter' in self.checks:
filter_check = lambda img: True if img.filter.upper() in self.filter_set_ref else False
self.log.info('checking filters')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(filter_check)
self.bfIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images due to unsupported filter'.format(len(self.bfIndex)))
self.wIndex = self.wIndex.drop(self.bfIndex)
# uncal check
if 'uncal' in self.checks:
cal_check = lambda img: True if ('RADECSYS' not in img.header) else (False if (img.header['RADECSYS'] == '-999') else True)
self.log.info('checking images for WCS')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(cal_check)
self.ucIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images for failed WCS'.format(len(self.ucIndex)))
self.wIndex = self.wIndex.drop(self.ucIndex)
if 'date' in self.checks:
if self.disc_date_mjd is None:
self.log.warn('discovery date not set, cannot do date check')
return
date_check = lambda img: True if ((img.mjd >= (self.disc_date_mjd + self.phase_limits[0])) and
(img.mjd <= (self.disc_date_mjd + self.phase_limits[1]))) else False
self.log.info('checking phases')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(date_check)
self.bdIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images that are outside of phase bounds'.format(len(self.bdIndex)))
self.wIndex = self.wIndex.drop(self.bdIndex)
# if there are none left, end pipeline
if len(self.wIndex) == 0:
self.log.warn('all images removed by checks --- cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
def find_ref_stars(self):
'''identify all suitable stars in ref image, compute ra & dec, write radecfile, store in instance'''
# if radecfile already exists, no need to do it
if os.path.exists(self.radecfile):
self.log.info('radecfile already exists, loading only')
self.radec = pd.read_csv(self.radecfile, delim_whitespace=True, skiprows = (0,1,3,4,5), names = ['RA','DEC'])
# set radec in Phot instances
for img in self.phot_instances.loc[self.wIndex]:
img.radec = self.radec
return
if self.refname == '' :
self.log.warn('refname has not been assigned, please do it first!')
return
# instantiate object to manage names
ref = Phot(self.refname, calmethod = self.calmethod)
# use sextractor to extract all stars to be used as refstars
sxcp = os.path.join(os.path.dirname(inspect.getfile(LOSSPhotPypeline)), 'conf', 'sextractor_config')
config = os.path.join(sxcp, 'kait.sex')
filt = os.path.join(sxcp, 'gauss_2.0_5x5.conv')
par = os.path.join(sxcp, 'kait.par')
star = os.path.join(sxcp, 'default.nnw')
cmd_list = ['sex', self.refname,
'-c', config,
'-PARAMETERS_NAME', par,
'-FILTER_NAME', filt,
'-STARNNW_NAME', star,
'-CATALOG_NAME', ref.sobj,
'-CHECKIMAGE_NAME', ref.skyfit]
p = subprocess.Popen(cmd_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, universal_newlines = True)
stdout, stderr = p.communicate()
self.log.debug(stdout)
self.log.debug(stderr)
# make sure process succeeded
if not os.path.exists(ref.sobj):
self.log.warn('SExtractor failed --- no sobj file generated, check!')
return
# read sobj file of X_IMAGE and Y_IMAGE columns, as well as MAG_APER for sort
with fits.open(ref.sobj) as hdul:
data = hdul[1].data
# sort according to magnitude, from small/bright to hight/faint
data.sort(order = 'MAG_APER')
imagex = data.X_IMAGE
imagey = data.Y_IMAGE
# transform to RA and DEC using ref image header information
cs = WCS(header = ref.header)
imagera, imagedec = cs.all_pix2world(imagex, imagey, 0)
# remove any identified "stars" that are too close to target
coords = SkyCoord(imagera, imagedec, unit = (u.deg, u.deg))
target_coords = SkyCoord(self.targetra, self.targetdec, unit = (u.deg, u.deg))
offsets = coords.separation(target_coords).arcsecond
imagera = imagera[offsets > self.sep_tol]
imagedec = imagedec[offsets > self.sep_tol]
# write radec file
with open(self.radecfile, 'w') as f:
f.write('TARGET\n')
f.write(' RA DEC\n')
f.write(' {:.7f} {:.7f}\n'.format(self.targetra, self.targetdec))
f.write('\nREFSTARS\n')
f.write(' RA DEC\n')
for i in range(len(imagera)):
f.write(' {:.7f} {:.7f}\n'.format(imagera[i], imagedec[i]))
self.log.info('{} written'.format(self.radecfile))
self.radec = pd.read_csv(self.radecfile, delim_whitespace=True, skiprows = (0,1,3,4,5), names = ['RA','DEC'])
# set radec in Phot instances
for img in self.phot_instances.loc[self.wIndex]:
img.radec = self.radec
def match_refcal_stars(self):
'''get calibration catalog, and match stars to ref stars -- only do if needed'''
if os.path.exists(os.path.join(self.calibration_dir, self.calfile)) is False:
# get calibration catalog
catalog = LPPu.astroCatalog(self.targetname, self.targetra, self.targetdec, relative_path = self.calibration_dir)
catalog.get_cal(method = self.cal_source)
self.calfile = catalog.cal_filename
self.cal_source = catalog.cal_source
self.log.info('calibration data sourced')
self.log.info('matching ref stars to catalog stars and selecting 40 brightest')
self.get_cal_info()
radec = SkyCoord(self.radec.loc[1:, 'RA'], self.radec.loc[1:, 'DEC'], unit = (u.deg, u.deg))
cal_cat = pd.read_csv(os.path.join(self.calibration_dir, self.calfile), delim_whitespace = True)
cal = SkyCoord(cal_cat.loc[:, 'ra'], cal_cat.loc[:, 'dec'], unit = (u.deg, u.deg))
idx, d2d, d3d = match_coordinates_sky(cal, radec)
cal_use = cal_cat.iloc[d2d.arcsecond < 5] # calibration stars that match within 5"
cal_use.index = self.radec.loc[1:].iloc[idx[d2d.arcsecond < 5]].index - 1 # don't count sn and align indices with radecfile
cal_use.insert(0, 'starID', cal_use.index)
cal_use = cal_use.sort_values(by = 'r').drop_duplicates(subset = 'starID', keep = 'first')
self.cal_use = cal_use.iloc[:40] # select top 40 brightest
# write "use" files
with open(os.path.join(self.calibration_dir, self.calfile_use), 'w') as outfile:
outfile.write(self.cal_use.to_string(index = False))
catalog = LPPu.astroCatalog(self.targetname, self.targetra, self.targetdec, relative_path = self.calibration_dir)
catalog.cal_filename = self.calfile_use
catalog.cal_source = self.cal_source
catalog.to_natural()
self.cal_arrays = catalog.get_cal_arrays(index_order = self.cal_use.index)
# show ref stars (and cut if interactive mode)
if self.interactive:
self._display_refstars(icut = True)
else:
self._display_refstars()
def do_galaxy_subtraction_all_image(self, subreg = 0.9):
'''performs galaxy subtraction on all selected image files'''
if not self.photsub:
self.log.warn('not in photsub mode, skipping galaxy subtraction')
return
self.log.info('starting galaxy subtraction')
if self.template_images is None:
self.load_templates()
if self.template_images is None:
self.log.warn('could not get suitable template images, running without galaxy subtraction')
self.photsub = False
return
# set up for parallelization
ti = self.template_images
fn = lambda img: img.galaxy_subtract(ti, subreg = subreg)
# do galaxy subtraction in the appropriate mode
if self.parallel is True:
res = p_map(fn, self.phot_instances.loc[self.wIndex].tolist())
else:
res = []
for img in tqdm(self.phot_instances.loc[self.wIndex].tolist()):
res.append(fn(img))
# extract results, log, and determine if successful
res = pd.DataFrame(res, columns = ['success', 'log'])
res['log'].apply(lambda log_entry: self._log_idl(*log_entry))
if not res['success'].all():
self.log.warn('photsub failed (probably b/c of missing templates), running without galaxy subtraction')
self._get_template_candidates()
self.photsub = False
self.log.info('galaxy subtraction done')
def do_photometry_all_image(self, forcesky = False):
'''performs photometry on all selected image files'''
self.log.info('starting photometry (galsub: {})'.format(self.photsub))
# set up for parallelization
ps = self.photsub
fn = lambda img: img.do_photometry(photsub = ps, forcesky = forcesky)
# do photometry in the appropriate mode
if self.parallel is True:
res = p_map(fn, self.phot_instances.loc[self.wIndex].tolist())
else:
res = []
for img in tqdm(self.phot_instances.loc[self.wIndex].tolist()):
res.append(fn(img))
# extract results, log, and remove failures
res = pd.DataFrame(res, columns = ['unsub', 'sub', 'log'])
res['log'].apply(lambda log_entry: self._log_idl(*log_entry))
self.pfIndex = self.wIndex[~res['unsub']]
self.log.warn('photometry failed on {} out of {} images'.format(len(self.pfIndex), len(self.wIndex)))
if self.photsub is False:
self.wIndex = self.wIndex.drop(self.pfIndex)
else:
self.psfIndex = self.wIndex[~res['sub']]
self.log.warn('photometry (sub) failed on {} out of {} images'.format(len(self.psfIndex), len(self.wIndex)))
self.wIndex = self.wIndex.drop(self.pfIndex.intersection(self.psfIndex))
if len(self.wIndex) == 0:
self.log.warn('all images failed, cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
self.log.info('photometry done')
def get_sky_all_image(self):
'''get and set sky value for every phot instance'''
self.log.info('getting sky value for each image')
self.phot_instances.loc[self.wIndex].progress_apply(lambda img: img.get_sky())
def calibrate(self, final_pass = False):
'''performs calibration on all images included in photlistfile, using outputs from do_photometry_all_image'''
if not final_pass:
self.log.info('performing calibration')
else:
self.log.info('doing final calibration')
# reset trackers
self.cfIndex = []
self.csfIndex = []
# calibration list
cal_list = []
# iterate through image list and execute calibration script on each
for idx, img in tqdm(self.phot_instances.loc[self.wIndex].iteritems(), total = len(self.wIndex)):
# set photsub mode appropriately
if self.photsub is False:
ps = False
elif (self.photsub is True) and (idx in self.psfIndex):
ps = False
else:
ps = True
# do calibration
phot = img.calibrate(self.cal_IDs, self.cal_arrays[img.color_term].loc[:, img.filter.upper()],
self.cal_arrays[img.color_term].loc[:, 'E'+img.filter.upper()], sub = ps, write_dat = final_pass)
phot.rename(columns = {self.calmethod: 'Mag_obs'}, inplace = True)
# add comparison information
phot.insert(0, 'Filter', img.filter.upper())
phot.loc[self.cal_IDs, 'RA_cal'] = self.cal_arrays[img.color_term].loc[self.cal_IDs, 'RA']
phot.loc[self.cal_IDs, 'DEC_cal'] = self.cal_arrays[img.color_term].loc[self.cal_IDs, 'DEC']
phot.loc[self.cal_IDs, 'Mag_cal'] = self.cal_arrays[img.color_term].loc[self.cal_IDs, img.filter.upper()]
phot.loc[self.cal_IDs, 'RA_diff'] = np.abs(phot.loc[self.cal_IDs, 'RA_obs'] - phot.loc[self.cal_IDs, 'RA_cal'])
phot.loc[self.cal_IDs, 'DEC_diff'] = np.abs(phot.loc[self.cal_IDs, 'DEC_obs'] - phot.loc[self.cal_IDs, 'DEC_cal'])
cal_list.append(phot.loc[self.cal_IDs, ['Filter', 'RA_diff', 'DEC_diff', 'Mag_obs', 'Mag_cal', 'ref_in', 'system']])
# check for success if in final pass mode
if final_pass:
if (os.path.exists(img.psfdat) is False):
self.cfIndex.append(idx)
if (self.photsub is True) and (os.path.exists(img.psfsubdat) is False):
self.csfIndex.append(idx)
# organize calibrators and compute globabl metrics
self.calibrators = pd.concat([df.loc[self.cal_IDs, :] for df in cal_list], keys = self.wIndex)
self.calibrators['Mag_diff'] = self.calibrators['Mag_obs'] - self.calibrators['Mag_cal']
# remove failures if in final pass mode
if final_pass:
self.cfIndex = pd.Index(self.cfIndex)
self.csfIndex = pd.Index(self.csfIndex)
self.log.warn('calibration failed on {} out of {} images'.format(len(self.cfIndex), len(self.wIndex)))
self.wIndex = self.wIndex.drop(self.cfIndex) # processing based only on non-subtracted images
if self.photsub is True:
self.log.warn('calibration (sub) failed on {} out of {} images'.format(len(self.csfIndex), len(self.wIndex)))
if len(self.wIndex) == 0:
self.log.warn('all images failed, cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
def do_calibration(self, use_filts = 'all', sig = 3, min_cut_diff = 0.5, quality_cuts = True):
'''check calibration and make cuts as needed'''
self.log.info('performing calibration')
# get filters used
self.filters = set(self.phot_instances.loc[self.wIndex].apply(lambda img: img.filter.upper()))
if use_filts == 'all':
use_filts = self.filters
if self.cal_IDs == 'all':
self.cal_IDs = self.cal_arrays['kait4'].index # choice of color term here is arbitrary
# iterate until acceptable tolerance is reached
accept_tol = False
skip_calibrate = False
iter_cnt = -1
while not accept_tol:
iter_cnt += 1
# run calibration
if not skip_calibrate:
self.calibrate()
skip_calibrate = False
# find indices (img, cal_ID) where Mag_obs failed to measure
locs = self.calibrators.loc[self.calibrators['Mag_obs'].isnull(), :].index
# pct of succ meas as a function of img
img_succ = (1 - locs.levels[0][locs.labels[0]].value_counts() / len(self.cal_IDs))
# pct of succ meas as a function of cal_ID
cal_succ = (1 - locs.levels[1][locs.labels[1]].value_counts() / len(self.wIndex))
# run minimal quality cuts if requested --- these are the first and second iterations
if (quality_cuts is True) and (iter_cnt < 2):
# remove any cal IDs or images with a very low success rate
ID_cut = cal_succ.index[cal_succ < 0.4]
if (len(ID_cut) > 0) and (iter_cnt == 0):
self.cal_IDs = self.cal_IDs.drop(ID_cut)
self.log.info('cut ID(s): {} from minimal quality cut'.format(ID_cut))
continue
elif iter_cnt == 0:
self.log.info('all IDs passed minimal quality cut')
iter_cnt = 1
img_cut = img_succ.index[img_succ < 0.4]
if (len(img_cut) > 0) and (iter_cnt == 1):
self.manual_remove(img_cut)
self.log.info('cut image(s): {} from minimal quality cut'.format(img_cut))
continue
elif iter_cnt == 1:
self.log.info('all images passed minimal quality cut')
iter_cnt = 2
elif iter_cnt < 2:
iter_cnt = 2
# cut to use common ref stars if requested --- these is the fourth iteration
# iteration 3 is used to remove outlier images before applying this cut
if (self.cal_use_common_ref_stars is True) and (iter_cnt == 3):
self.log.info('finding common ref stars')
accept = False
cnt = 0
while not accept:
current_pct = 1 - cnt * self.pct_increment
tmp = cal_succ[cal_succ >= current_pct]
if len(tmp) > self.min_ref_num:
self.log.info('{} ref stars are in at least {} pct of images, using these'.format(len(tmp), 100*current_pct))
accept = True
elif current_pct < self.in_pct_floor:
self.log.warn('reached minimum tolerance for pct image including ref stars, quitting')
self.run_succss = False
self.current_step = self.steps.index(self.write_summary) - 1
return
cnt += 1
if len(tmp) < len(self.cal_IDs):
self.cal_IDs = tmp.index
continue
# instantiate trackers
cut_list = [] # store IDs that will be cut
nan_list = [] # IDs of NaN to be cut immediately
full_list = []
bad_img_list = [] # indices of images that are <sig> outliers
single_cut_idx = None
tmp_max = self.cal_diff_tol
df_list = []
# group by filter and perform comparison
for filt, group in self.calibrators.groupby('Filter', sort = False):
# use specific filters if specified
if filt not in use_filts:
continue
# if clear is not the only filter, skip it in comparison unless forced to use
if (len(self.filters) > 1) and ('CLEAR' in self.filters) and (filt == 'CLEAR') and (not self.cal_force_clear):
continue
# compute metrics
df = group.median(level = 1)
df.loc[:, 'pct_im'] = group['Mag_obs'].notnull().sum(level=1) / len(group['Mag_obs'].groupby(level=0))
df.loc[:, 'std_obs'] = group.std(level = 1).loc[:, 'Mag_obs']
df = df.sort_index()
df.loc[:, 'Diff'] = np.abs(df.loc[:, 'Mag_diff'])
# identify possible exclusions
cut_list.extend(list(df.index[df.loc[:, 'Diff'] > self.cal_diff_tol]))
nan_list.extend(list(df.index[df.loc[:, 'Diff'].isnull()]))
if len(nan_list) > 0:
break
full_list = list(df.index) # ok to overwrite b/c same each time
## exclude outlier images by iterating through all cal IDs and finding images of <sig> outliers
for id in self.cal_IDs:
selection = self.calibrators.loc[self.calibrators['Filter'] == filt, :].loc[(self.wIndex, id),['Mag_obs', 'system']]
for sys, grp in selection.groupby('system', sort = False):
grp = grp.loc[grp['Mag_obs'].notnull(), :]
mags = grp.loc[:, 'Mag_obs'].values
index = grp.index.levels[0][grp.index.labels[0]] # image indices
if len(mags) > 0:
bad_img_list.extend(index[np.abs(mags - mags.mean()) > np.max([min_cut_diff, sig * mags.std()])])
if self.interactive:
print('\nFilter: {}'.format(filt))
print('*'*60)
rnd = pd.Series([2,4,4,3,3,3,3], index = ['pct_im', 'RA_diff', 'DEC_diff', 'Mag_cal', 'Mag_obs', 'std_obs', 'Mag_diff'])
print(df.loc[:, ['pct_im', 'RA_diff', 'DEC_diff', 'Mag_cal', 'Mag_obs', 'std_obs', 'Mag_diff']].round(rnd))
else:
# find index and value of maximum diff
maxi = df.loc[:, 'Diff'].idxmax()
maxd = df.loc[maxi, 'Diff']
if maxd > tmp_max:
single_cut_idx = maxi
tmp_max = maxd
df.insert(0, 'Filter', filt)
df_list.append(df)
cut_list = list(set(cut_list))
bad_img_list = list(set(bad_img_list))
# remove NaN
if len(nan_list) > 0:
self.log.info('cutting ID(s) {} for NaN'.format(', '.join([str(i) for i in nan_list])))
self.cal_IDs = self.cal_IDs.drop(nan_list)
continue
# make cuts to refstars as needed
if self.interactive:
# show ref stars and calibrated light curves
fig, ax = plt.subplots(1, 2, figsize = (12, 6))
self._display_refstars(ax = ax[0], display = True)
if self.photsub:
r = self.phot_instances.loc[self.wIndex].apply(lambda img: pd.Series([img.mjd, img.filter, img.phot_sub.loc[-1, self.calmethod],
img.phot_sub.loc[-1, self.calmethod + '_err'], img.color_term]))
else:
r = self.phot_instances.loc[self.wIndex].apply(lambda img: pd.Series([img.mjd, img.filter, img.phot.loc[-1, 'Mag_obs'],
img.phot.loc[-1, self.calmethod + '_err'], img.color_term]))
r.columns = ('mjd', 'filter', 'mag', 'emag', 'system')
p = LPPu.plotLC(offset_scale = 2)
for idx, ct in enumerate(set(r['system'])):
fs = 'full'
if 'nickel' in ct:
fs = 'none'
for filt in set(r['filter']):
selector = (r['filter'] == filt) & r['mag'].notnull() & (r['system'] == ct)
if (self.max_display_phase != 0) and (self.max_display_phase != 'all'):
selector = selector & (r['mjd'] - r['mjd'].min() < self.max_display_phase)
line, = ax[1].plot(r.loc[selector, 'mjd'], r.loc[selector, 'mag'] + p._offset(filt), c = p._color(filt),
marker = ['o', 'D', 's', 'v', '^'][idx], linestyle = 'None', picker = 3,
label = '{},{}'.format(filt, ct), fillstyle = fs)
ax[1].invert_yaxis()
ax[1].set_xticks(())
ax[1].set_yticks(())
x0, x1 = ax[1].get_xlim()
y0, y1 = ax[1].get_ylim()
ax[1].set_aspect(np.abs((x1-x0)/(y1-y0)))
plt.tight_layout()
def onpick(event):
ind = event.ind[0]
filt, sys = event.artist._label.split(',')
row = r.loc[(r['filter'] == filt) & (r['system'] == sys) & (r['mjd'] == event.artist._x[ind]), :]
id = row.index[0]
cal = self.phot_instances.loc[id].phot.loc[self.cal_IDs, 'Mag_obs']
print('\nClicked Point Information:')
print('\tImage ID: {}'.format(id))
print('\tImage Name: {}'.format(self.image_list.loc[id]))
print('\tMJD: {:.1f}'.format(row['mjd'].item()))
print('\tMag: {:.1f} pm {:.1f}'.format(row['mag'].item(), row['emag'].item()))
print('\tFilter: {}'.format(filt))
print('\tSystem: {}'.format(sys))
print('\tcal IDs used: {}/{}'.format(len(cal.loc[cal.notnull()]), len(cal)))
print('\tfailed cal IDs: {}'.format(', '.join([str(i) for i in sorted(cal.loc[cal.isnull()].index)])))
print('\nChoice >')
cid = fig.canvas.mpl_connect('pick_event', lambda event: onpick(event))
print('*'*60)
nshow = np.min([len(cal_succ), len(img_succ), 10])
if nshow > 0:
print('\nSuccess Rate Per (worst {})'.format(nshow))
print('{:<12} {:<12}'.format('cal ID', 'image'))
for c, i in itertools.zip_longest(cal_succ.iloc[:nshow].index, img_succ.iloc[:nshow].index):
print('{:<4} {:<7} {:<4} {:<7}'.format(c, round(cal_succ.loc[c], 3), i, round(img_succ.loc[i], 3)))
# warn if any individual images have too few ref stars
ref_counts = self.calibrators['Mag_obs'].notnull().sum(level = 0)
if (ref_counts < self.min_ref_num).sum() > 0:
print('\nWarning - the following image(s) have below the minimum number of ref stars ({}):'.format(self.min_ref_num))
print(ref_counts.index[ref_counts < self.min_ref_num])
if (ref_counts == self.min_ref_num).sum() > 0:
print('\nWarning - the following image(s) have the minimum number of ref stars ({}):'.format(self.min_ref_num))
print(ref_counts.index[ref_counts == self.min_ref_num])
print('\nDo not cut the following ID(s) to avoid falling below the minimum:')
idx_selector = (ref_counts.index[ref_counts == self.min_ref_num], self.cal_IDs)
num_affected = self.calibrators.loc[idx_selector, 'Mag_obs'].notnull().sum(level=1)
print(num_affected.index[num_affected > 0].sort_values())
if len(bad_img_list) > 0:
print('\nWarning - the following image(s) are outliers:')
print(bad_img_list)
print('\nAt tolerance {}, {} ID(s) (out of {}) will be cut'.format(self.cal_diff_tol, len(cut_list), len(full_list)))
print(sorted(cut_list))
print('\nSelect an option below (or click on light curve points to get info):')
print('\tAccept cuts with tolerance of {} mag ([y])'.format(self.cal_diff_tol))
print('\tAdjust tolerance [enter float between 0 and 1]')
print('\tCut calibration star(s) by ID(s) [comma separated list of IDs to cut]')
print('\tDisplay image ["d" followed by index (e.g. d162)]')
print('\tCut image(s) ["c" followed by comma separated indexes (e.g. c162,163)]')
print('\tView measured mags for specific cal star ["<passband>" followed by cal ID (e.g. B5)]')
response = input('\nChoice > '.format(self.cal_diff_tol))
fig.canvas.mpl_disconnect(cid)
plt.ioff()
plt.close()
if (response == '') or ('y' in response.lower()):
self.cal_IDs = self.cal_IDs.drop(cut_list)
accept_tol = True
elif '.' in response:
self.cal_diff_tol = float(response)
skip_calibrate = True
elif response.lower()[0] == 'd':
self.compare_image2ref(int(response[1:]))
skip_calibrate = True
elif (response.lower()[0] == 'c') and (response.lower()[1] != 'l'):
self.manual_remove([int(i) for i in response[1:].split(',')])
elif response[0] in self.filters:
self._display_obs_cal_mags(response[0], int(response[1:]))
skip_calibrate = True
elif response[:5].lower() == 'clear':
self._display_obs_cal_mags(response[:5], int(response[5:]))
else:
self.cal_IDs = self.cal_IDs.drop([int(i) for i in response.split(',')])
elif (len(bad_img_list) > 0):
self.log.info('removing {} outlier image(s): {}'.format(len(bad_img_list), bad_img_list))
self.manual_remove(bad_img_list)
elif single_cut_idx is None:
accept_tol = True
elif len(full_list) > self.min_ref_num:
self.log.info('cutting ID {} for exceeding tolerance and re-running calibration'.format(single_cut_idx))
self.cal_IDs = self.cal_IDs.drop([single_cut_idx])
elif self.cal_diff_tol <= self.abs_cal_tol:
self.log.info('increasing tolerance to {} and re-running calibration'.format(self.cal_diff_tol))
self.cal_diff_tol += 0.05
else:
self.log.warn('calibration tolerance exceeds {}, cannot proceed'.format(self.abs_cal_tol))
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
with open(os.path.join(self.calibration_dir, 'final_ref_stars.dat'), 'w') as outfile:
outfile.write(pd.concat([df.loc[self.cal_IDs, :] for df in df_list], sort = False).to_string())
# make final pass on calibration to track failures and write .dat files
self.calibrate(final_pass = True)
# write final "use" files
with open(os.path.join(self.calibration_dir, self.calfile_use), 'w') as outfile:
outfile.write(self.cal_use.loc[self.cal_IDs, :].to_string(index = False))
catalog = LPPu.astroCatalog(self.targetname, self.targetra, self.targetdec, relative_path = self.calibration_dir)
catalog.cal_filename = self.calfile_use
catalog.cal_source = self.cal_source
catalog.to_natural()
# show new ref stars
plt.ioff()
self._display_refstars()
def get_zeromag_all_image(self):
'''get and set zeromag for every phot instance'''
self.log.info('getting zeromag for each image')
self.phot_instances.loc[self.wIndex].progress_apply(lambda img: img.get_zeromag())
def get_limmag_all_image(self):
'''get and set limiting mag for every phot instance'''
self.log.info('getting limiting mag for each image')
self.phot_instances.loc[self.wIndex].progress_apply(lambda img: img.calc_limmag())
def generate_raw_lcs(self, color_term, photsub_mode = False):
'''builds raw light curve files from calibrated results'''
# light curve containers
columns = (';; MJD','etburst', 'mag', '-emag', '+emag', 'limmag', 'filter', 'imagename')
lc = {name: [] for name in columns}
lcs = {m: copy.deepcopy(lc) for m in self.photmethod}
# limiting mag containers
lm = {name: [] for name in columns}
lms = {m: copy.deepcopy(lm) for m in self.photmethod}
# iterate through files and extract LC information
for idx, img in self.phot_instances.loc[self.wIndex].iteritems():
# immediately skip if not the appropriate color term unless being forced
if (color_term != img.color_term) and (self.force_color_term is False):
continue
# skip failed images
if (idx in self.cfIndex) and (photsub_mode is False):
continue
elif ((idx in self.psfIndex) or (idx in self.csfIndex)) and (photsub_mode is True):
continue
# read photometry results
cols = (0,) + sum(((self.phot_cols[m], self.phot_cols[m] + 1) for m in self.photmethod), ())
col_names = ('ID',) + sum(((m + '_mag', m + '_err') for m in self.photmethod), ())
if photsub_mode is False:
dat = img.psfdat
else:
dat = img.psfsubdat
d = pd.read_csv(dat, header = None, delim_whitespace = True, comment = ';', usecols=cols, names = col_names)
# detect if no target in file
if 1 not in d['ID'].values:
self.log.warn('no object in calibrated photometry file: {}'.format(dat))
if photsub_mode is False:
self.noIndex.append(idx)
else:
self.nosIndex.append(idx)
# setup columns for each raw file
for m in self.photmethod:
if 1 not in d['ID'].values:
continue # skip these ones
mag = d[d['ID'] == 1][m + '_mag'].item()
err = d[d['ID'] == 1][m + '_err'].item()
if np.isnan(mag):
record = lms[m]
else:
record = lcs[m]
record['mag'].append(round(mag,5))
record['-emag'].append(round(mag - err,5))
record['+emag'].append(round(mag + err,5))
record[';; MJD'].append(round(img.mjd, 6))
record['etburst'].append(round(img.exptime / (60 * 24), 5)) # exposure time in days
record['filter'].append(img.filter.upper())
record['imagename'].append(img.cimg)
record['limmag'].append(round(img.limmag, 5))
# write raw lc files
for m in self.photmethod:
lc_raw_name = self._lc_fname(color_term, m, 'raw', sub = photsub_mode)
lc_raw = pd.DataFrame(lcs[m])
lc_raw.to_csv(lc_raw_name, sep = '\t', columns = columns, index = False, na_rep = 'NaN')
lm_raw_name = self._lc_fname(color_term, m, 'ul', sub = photsub_mode)
lm_raw = pd.DataFrame(lms[m])
lm_raw.to_csv(lm_raw_name, sep = '\t', columns = columns, index = False, na_rep = 'NaN')
p = LPPu.plotLC(lc_file = lc_raw_name, lm_file = lm_raw_name, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
def generate_bin_lc(self, infile, outfile):
'''wraps IDL lightcurve binning routine'''
idl_cmd = '''idl -e "lpp_dat_res_bin, '{}', '{}', OUTFILE='{}', /OUTPUT"'''.format(infile, outfile, outfile)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
def generate_group_lc(self, infile, outfile):
'''wraps IDL lightcurve grouping routine'''
idl_cmd = '''idl -e "lpp_dat_res_group, '{}', '{}', OUTFILE='{}'"'''.format(infile, outfile, outfile)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
if (', not doing group' in stdout) or (os.path.exists(outfile) is False):
return False
else:
return True
def generate_final_lc(self, color_term, infile, outfile):
'''wraps IDL routine to convert from natural system'''
idl_cmd = '''idl -e "lpp_invert_natural_stand_objonly, '{}', '{}', OUTFILE='{}', /OUTPUT"'''.format(infile, color_term, outfile)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
if not os.path.exists(outfile):
return False
else:
return True
def raw2standard_lc(self, infile):
'''wrap intermediate steps that transform light curves from "raw" to "standard"'''
# assign convenience variables
tmp = infile.split('_')
ct = tmp[tmp.index('natural') - 2] # get color term
m = tmp[tmp.index('natural') - 1] # get phot aperture
binfile = infile.replace('raw', 'bin')
groupfile = binfile.replace('bin', 'group')
lc = groupfile.replace('natural_group', 'standard')
# do intermediate light curve steps
self.generate_bin_lc(infile, binfile)
grp_result = self.generate_group_lc(binfile, groupfile)
if grp_result is False:
self.log.warn('no groupfile generated, skipping')
return False, False
std_result = self.generate_final_lc(ct, groupfile, lc)
if std_result is False:
self.log.warn('no standard lc generated, skipping')
return True, False
# plot
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
return True, True
def get_color_term_used(self):
'''get dictionary counting use of each color term'''
ct = self.phot_instances.loc[self.wIndex].apply(lambda img: img.color_term)
self.color_terms_used = dict(ct.value_counts())
def generate_lc(self, sub = False):
'''performs all functions to transform image photometry into calibrated light curve of target'''
self.log.info('generating and plotting light curves (sub mode: {})'.format(sub))
# set up file system
if not os.path.isdir(self.lc_dir):
os.makedirs(self.lc_dir)
self.get_color_term_used()
# generate raw light curves
self.log.info('generating raw light curves for the following color terms: {}'.format(', '.join(self.color_terms_used.keys())))
for ct in tqdm(self.color_terms_used.keys()):
self.generate_raw_lcs(ct, photsub_mode = sub)
# generate intermediate and final light curves
self.log.info('generating "standard" light curves')
for m in tqdm(self.photmethod):
all_nat = []
all_std = []
for ct in self.color_terms_used.keys():
group_succ, standard_succ = self.raw2standard_lc(self._lc_fname(ct, m, 'raw', sub = sub))
# only add group and standard if group has been updated
if group_succ is True:
all_nat.append((ct, self._lc_fname(ct, m, 'group', sub = sub)))
if standard_succ is True:
all_std.append(self._lc_fname(ct, m, 'standard', sub = sub))
# make "all" light curves
lc_nat = self._lc_fname('all', m, 'group', sub = sub)
concat_list = []
for row in all_nat:
tmp = pd.read_csv(row[1], delim_whitespace = True)
tmp.insert(3, 'SYSTEM', row[0])
concat_list.append(tmp)
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc_nat, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc_nat, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
lc = self._lc_fname('all', m, 'standard', sub = sub)
concat_list = []
for fl in all_std:
concat_list.append(pd.read_csv(fl, delim_whitespace = True))
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
self.log.info('done with light curves')
self.run_success = True
# use recursion to handle sub if needed
if (self.photsub is True) and (sub is False):
self.generate_lc(sub = True)
def get_errors(self, method = 'sn6', kpix_rad = 20, skip_photsub = False, photsub = 'auto', ps = 0.7965,
host_ra = None, host_dec = None, rseed = None):
'''inject artificial stars of same mag as SN at each epoch and compute mags'''
self.log.info('doing artificial star simulation to determine errors')
# set seed
if rseed is not None:
np.random.seed(rseed)
# make directory for new generated data
if not os.path.exists(self.error_dir):
os.makedirs(self.error_dir)
if (photsub == 'auto') or (type(photsub) != type(True)):
photsub = self.photsub
# hard coded
n_stars = 30
# compute coords of n_stars around host
def handle_img(img, ret_xy = False, method = method):
cs = WCS(header = img.header)
# get pix coords of sn
sn_x, sn_y = cs.all_world2pix(self.targetra, self.targetdec, 0)
# select appropriate method
if method == 'snhost':
# ring of radius equal to distance between sn and nucleus
n_stars = 10
host_x, host_y = cs.all_world2pix(host_ra, host_dec, 0)
theta_sn = np.arctan2(sn_y - host_y, sn_x - host_x) # angle relative to hose
# coordinates of artificial stars
dtheta = np.linspace(2*np.pi/n_stars, 2*np.pi - 2*np.pi/n_stars, n_stars)
x = host_x + np.sqrt((sn_y - host_y)**2 + (sn_x - host_x)**2) * np.cos(theta_sn + dtheta)
y = host_y + np.sqrt((sn_y - host_y)**2 + (sn_x - host_x)**2) * np.sin(theta_sn + dtheta)
elif method == 'squares':
# square distribution as discussed w/ zwk and TdG
x_comp = np.cos(np.linspace(np.pi/4, 2*np.pi - np.pi / 4, 4))
x = sn_x + (kpix_rad * ps / img.pixscale) * np.concatenate([x_comp, 2 * x_comp, 2 * np.cos(np.pi/4) * np.array([1,0,-1,0])])
y_comp = np.sin(np.linspace(np.pi/4, 2*np.pi - np.pi / 4, 4))
y = sn_y + (kpix_rad * ps / img.pixscale) * np.concatenate([y_comp, 2 * y_comp, 2 * np.sin(np.pi/4) * np.array([0,1,0,-1])])
n_stars = len(x)
else:
# preferred method of concentric hexagons with radius increments of 20 KAIT pixels
dtheta = np.linspace(0, 2*np.pi, 7)[:-1]
x = sn_x + (kpix_rad * ps / img.pixscale) * np.concatenate((np.cos(dtheta), 2 * np.cos(dtheta + np.pi/6), 3 * np.cos(dtheta),
4 * np.cos(dtheta + np.pi/6), 5 * np.cos(dtheta)))
y = sn_y + (kpix_rad * ps / img.pixscale) * np.concatenate((np.sin(dtheta), 2 * np.sin(dtheta + np.pi/6), 3 * np.sin(dtheta),
4 * np.sin(dtheta + np.pi/6), 5 * np.sin(dtheta)))
n_stars = len(x)
# if just want pixel coords, return them along with WCS instance
if ret_xy is True:
return cs, x, y
# get magnitude of sn at this epoch
mag = np.nan
try:
if photsub is False:
mag = img.phot_raw.loc[-1, self.calmethod]
emag = img.phot_raw.loc[-1, self.calmethod + '_err']
else:
mag = img.phot_sub_raw.loc[-1, self.calmethod]
emag = img.phot_sub_raw.loc[-1, self.calmethod + '_err']
except AttributeError:
pass
if (np.isnan(mag)) or (np.isinf(mag)):
return False, None
# if random seed given, injected mags drawn from a gaussian of width set by uncertainty
if rseed is None:
inj_mags = [mag]*n_stars
else:
inj_mags = np.random.normal(mag, emag, n_stars).tolist()
assert n_stars == len(x)
# IDL call leads to new images in new directory
idl_cmd = '''idl -e "lpp_sim_fake_star, '{}', {}, {}, {}, OUTFILE='{}', PSFFITARRFILE='{}', /USENATURALMAG"'''.format(img.cimg,
x.tolist(), y.tolist(), inj_mags, os.path.join(self.error_dir, os.path.basename(img.cimg)), img.psffitarr)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
# do checks on success then return
if os.path.exists(os.path.join(self.error_dir, os.path.basename(img.cimg))):
return True, inj_mags
else:
return False, None
self.log.info('creating images with artificial stars')
succ = []
mags = []
for img in tqdm(self.phot_instances.loc[self.wIndex].tolist()):
s, m = handle_img(img)
succ.append(s)
if m is not None:
mags.append(m)
# drop images with no mag
self.wIndex = self.wIndex[pd.Series(succ)]
# instantiate pipeline instance and inherit many parent attributes
sn = LPP(self.targetname, interactive = False, parallel = self.parallel, cal_diff_tol = self.cal_diff_tol, force_color_term = self.force_color_term,
wdir = self.wdir, cal_use_common_ref_stars = self.cal_use_common_ref_stars, autoloadsave = False, sep_tol = self.sep_tol)
vs = vars(self).copy()
vs.pop('steps')
vs.pop('log')
vs.pop('phot_instances')
for v in vs.keys():
s = 'sn.{} = vs["{}"]'.format(v, v)
exec(s)
sn.interactive = False
self.log.info('running pipeline steps on images with artificial stars')
# change image paths and load instances
sn.image_list = sn.image_list.apply(lambda fl: os.path.join(self.error_dir, os.path.basename(fl)))
sn.phot_instances = sn._im2inst(sn.image_list.loc[sn.wIndex], mode = 'quiet')
# include artificial stars in radec
cs, x, y = handle_img(Phot(self.refname, calmethod = self.calmethod), ret_xy = True)
fake_ra, fake_dec = cs.all_pix2world(x, y, 0)
for img in sn.phot_instances.loc[sn.wIndex]:
img.radec = self.radec.append(pd.DataFrame({'RA': fake_ra, 'DEC': fake_dec}), ignore_index = True)
# run needed pipeline steps on those new images
if (skip_photsub is False) and (photsub is True):
sn.do_galaxy_subtraction_all_image()
sn.do_photometry_all_image()
sn.get_sky_all_image()
sn.calibrate(final_pass = True) # don't really care about calibration, but need to do to read results
# gather, organize and write
sn.lc_dir = self.lc_dir + '_sim'
sn.lc_base = os.path.join(sn.lc_dir, 'lightcurve_{}_'.format(self.targetname))
if not os.path.exists(sn.lc_dir):
os.makedirs(sn.lc_dir)
def get_res(idx, ps):
img = sn.phot_instances.loc[idx]
if ps is False:
#tmp = img.phot.iloc[-n_stars:].loc[:, 'Mag_obs']
tmp = img.phot_raw.iloc[-n_stars:].loc[:, sn.calmethod]
else:
#tmp = img.phot_sub.iloc[-n_stars:].loc[:, sn.calmethod]
tmp = img.phot_sub_raw.iloc[-n_stars:].loc[:, sn.calmethod]
self.phot_instances.loc[idx].sim_err = tmp.std()
return tmp
res = []
for idx in sn.wIndex:
res.append(get_res(idx, photsub))
res = pd.DataFrame(res, index = sn.wIndex)
res.columns = sn.phot_instances.loc[sn.wIndex[0]].phot.index[-n_stars:]
# put mags into DataFrame
mags = pd.DataFrame(mags, index = self.wIndex)
mags.columns = sn.phot_instances.loc[sn.wIndex[0]].phot.index[-n_stars:]
# write results
with open(os.path.join(sn.lc_dir, 'sim_{}_injmags.dat'.format(sn.calmethod)), 'w') as f:
f.write(mags.to_string())
with open(os.path.join(sn.lc_dir, 'sim_{}_recmags.dat'.format(sn.calmethod)), 'w') as f:
f.write(res.to_string())
# write updated errors to lc
self.write_sim_lc(sn = sn, mags = mags, res = res, photsub = photsub)
# save image with inj stars labeled
sn._display_refstars(x = x, y = y, labels = res.columns, save_fig = os.path.join(sn.lc_dir, 'inj_stars.png'))
sn.savefile = sn.savefile.replace('.sav', '_sim.sav')
sn.save()
self.save()
def write_sim_lc(self, sn = None, mags = None, res = None, photsub = 'auto', drop_inj = []):
'''write sim errs to light curves'''
if (photsub == 'auto') or (type(photsub) != type(True)):
photsub = self.photsub
# instantiate if needed
if sn is None:
sn = LPP(self.targetname, interactive = False)
sn.savefile = sn.savefile.replace('.sav', '_sim.sav')
sn.load()
# read mags and sim results if needed
if mags is None:
mags = pd.read_csv(os.path.join(sn.lc_dir, 'sim_{}_injmags.dat'.format(sn.calmethod)), delim_whitespace = True, index_col = 0)
mags.columns = mags.columns.astype('int')
if res is None:
res = pd.read_csv(os.path.join(sn.lc_dir, 'sim_{}_recmags.dat'.format(sn.calmethod)), delim_whitespace = True, index_col = 0)
res.columns = res.columns.astype('int')
# drop any specied injected stars
mags = mags.drop(drop_inj, axis = 1)
res = res.drop(drop_inj, axis = 1)
# compute result metrics
residuals = mags.loc[sn.wIndex] - res.loc[sn.wIndex]
r = pd.concat([sn.image_list.loc[sn.wIndex], res.mean(axis = 1), res.median(axis = 1), res.std(axis = 1), residuals.mean(axis = 1)], axis = 1)
r.columns = ('imagename', 'sim_mean_mag', 'sim_med_mag', 'sim_std_mag', 'mean_residual')
with open(os.path.join(sn.lc_dir, 'sim_{}_results.dat'.format(sn.calmethod)), 'w') as f:
f.write(r.to_string(index = False))
with open(os.path.join(sn.lc_dir, 'sim_{}_summary.dat'.format(sn.calmethod)), 'w') as f:
f.write(r.describe().round(3).to_string())
with open(os.path.join(sn.lc_dir, 'sim_{}_rec_mean_mags.dat'.format(sn.calmethod)), 'w') as f:
f.write(res.mean(axis = 0).round(3).to_string())
r['imagename'] = r['imagename'].str.replace(self.error_dir, self.data_dir)
# do all light curves (with full uncertainty as quadrature sum of three sources)
all_nat = []
all_std = []
columns = (';; MJD', 'etburst', 'mag', '-emag', '+emag', 'limmag', 'filter', 'imagename')
ps_choice = photsub
self.log.info('updating LC errors')
for ct in tqdm(self.color_terms_used.keys()):
# generate raw light curves
lc = pd.read_csv(self._lc_fname(ct, sn.calmethod, 'raw', sub = ps_choice), delim_whitespace = True, comment = ';', names = columns)
tmp = pd.merge(lc, r, on = 'imagename', how = 'left')
orig_stat_err = (tmp['+emag'] - tmp['-emag'])/2
new_err = np.sqrt(orig_stat_err**2 + tmp['sim_std_mag']**2)
tmp['-emag'] = round(tmp['mag'] - new_err, 5)
tmp['+emag'] = round(tmp['mag'] + new_err, 5)
lc_raw_name = sn._lc_fname(ct, sn.calmethod, 'raw', sub = ps_choice)
tmp.drop(['sim_mean_mag', 'sim_med_mag', 'sim_std_mag', 'mean_residual'], axis = 'columns').to_csv(lc_raw_name, sep = '\t', columns = columns,
index = False, na_rep = 'NaN')
p = LPPu.plotLC(lc_file = lc_raw_name, name = self.targetname, photmethod = self.calmethod)
p.plot_lc(extensions = ['.ps', '.png'])
# generate remaining light curves
group_succ, standard_succ = self.raw2standard_lc(lc_raw_name)
if group_succ is True:
all_nat.append((ct, sn._lc_fname(ct, sn.calmethod, 'group', sub = ps_choice)))
if standard_succ is True:
all_std.append(sn._lc_fname(ct, sn.calmethod, 'standard', sub = ps_choice))
# make "all" light curves
lc_nat = sn._lc_fname('all', sn.calmethod, 'group', sub = ps_choice)
concat_list = []
for row in all_nat:
tmp = pd.read_csv(row[1], delim_whitespace = True)
tmp.insert(3, 'SYSTEM', row[0])
concat_list.append(tmp)
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc_nat, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc_nat, name = self.targetname, photmethod = self.calmethod)
p.plot_lc(extensions = ['.ps', '.png'])
lc = sn._lc_fname('all', self.calmethod, 'standard', sub = ps_choice)
concat_list = []
for fl in all_std:
concat_list.append(pd.read_csv(fl, delim_whitespace = True))
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = self.calmethod)
p.plot_lc(extensions = ['.ps', '.png'])
def write_summary(self):
'''write summary file'''
# get filters used
self.filters = set(self.phot_instances.loc[self.wIndex].apply(lambda img: img.filter.upper()))
ctu = self.color_terms_used
if ctu is not None:
ctu = ', '.join(ctu.keys())
stars = self.cal_IDs
if stars != 'all':
stars = ', '.join(self.cal_IDs.astype(str))
self.summary_file = self.targetname + '.summary'
with open(self.summary_file, 'w') as f:
f.write('{:<25}{}\n'.format('targetname', self.targetname))
f.write('{:<25}{}\n'.format('photsub', self.photsub))
f.write('{:<25}{}\n'.format('filters', ', '.join(self.filters)))
f.write('{:<25}{}\n'.format('apertures', ', '.join(self.photmethod)))
f.write('{:<25}{}\n'.format('calmethod', self.calmethod))
f.write('{:<25}{}\n'.format('color_terms', ctu))
f.write('{:<25}{}\n'.format('num images', len(self.phot_instances)))
f.write('{:<25}{}\n'.format('num failures', len(self.aIndex) - len(self.wIndex)))
f.write('{:<25}{}\n'.format('num non-sup. filt.', len(self.bfIndex)))
f.write('{:<25}{}\n'.format('num excl. by date', len(self.bdIndex)))
f.write('{:<25}{}\n'.format('num phot failures', len(self.pfIndex)))
f.write('{:<25}{}\n'.format('num cal failures', len(self.cfIndex)))
f.write('{:<25}{}\n'.format('num no obj', len(self.noIndex)))
f.write('{:<25}{}\n'.format('num manually removed', len(self.mrIndex)))
f.write('{:<25}{}\n'.format('cal source', self.cal_source))
f.write('{:<25}{}\n'.format('cal stars', stars))
f.write('{:<25}{}\n'.format('cal tolerance', round(self.cal_diff_tol, 2)))
f.write('{:<25}{}\n'.format('run successful', self.run_success))
self.log.info('pipeline complete, summary file written')
self.save()
def get_host_photometry(self, tel = 'nickel'):
'''do photometry of the host galaxy'''
# instantiate pipeline instance and inherit many parent attributes
sn = LPP(self.targetname, interactive = False, parallel = False, cal_diff_tol = self.cal_diff_tol, force_color_term = self.force_color_term,
wdir = self.wdir, cal_use_common_ref_stars = self.cal_use_common_ref_stars, autoloadsave = False, sep_tol = self.sep_tol)
# setup
sn.radec = self.radec
sn.image_list = pd.Series([self.template_images['{}_{}'.format(filt, tel)] for filt in 'B V R I'.split(' ')])
sn.phot_instances = sn._im2inst(sn.image_list, mode = 'quiet')
sn.wIndex = sn.image_list.index
sn.cal_arrays = self.cal_arrays
sn.cal_IDs = self.cal_IDs
# do photometry
sn.photsub = False
sn.do_photometry_all_image(forcesky = True)
sn.get_sky_all_image()
sn.calibrate(final_pass = True)
sn.get_zeromag_all_image()
sn.get_limmag_all_image()
sn.lc_dir = 'host_photometry'
sn.lc_base = os.path.join(sn.lc_dir, 'lightcurve_{}_host_'.format(sn.targetname))
sn.lc_ext = {'raw': '_natural_raw.dat',
'bin': '_natural_bin.dat',
'group': '_natural_group.dat',
'standard': '_standard.dat',
'ul': '_natural_ul.dat'}
sn.generate_lc()
###################################################################################################
# Utility Methods
###################################################################################################
def manual_remove(self, id, save_img = True):
'''manually remove an index (or list of indices) from consideration'''
if type(id) is int:
id = [id]
id = | pd.Index(id) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 14 13:14:38 2022
@author: mauro
"""
def plot_boxplot(input_data, fig_name):
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'font.size' : 10})
axis_font = {'fontname' : 'Arial', 'size' : '16'}
sns.boxplot(data=input_data)
#plt.ylim(0,22)
plt.xlabel("Case", **axis_font)
plt.ylabel("RMSE", **axis_font)
plt.savefig(fig_name)
plt.show()
if __name__ == '__main__':
import glob
import numpy as np
import pandas as pd
#file_path = glob.glob('./data/[!_]*.xlsx')
file_path = glob.glob('./data/*.xlsx')
df = pd.read_excel(file_path[0])
# XGBoost
xgboost_labic = df.values[1:,:1].reshape(-1).astype(np.float32)
xgboost_uci = df.values[1:,1:2].reshape(-1).astype(np.float32)
xgboost_dincon = df.values[1:,2:3].reshape(-1).astype(np.float32)
# Random Forest
rf_labic = df.values[1:,3:4].reshape(-1).astype(np.float32)
rf_uci = df.values[1:,4:5].reshape(-1).astype(np.float32)
rf_dincon = df.values[1:,5:6].reshape(-1).astype(np.float32)
# LSTM
lstm_labic = df.values[1:,6:7].reshape(-1).astype(np.float32)
lstm_uci = df.values[1:,7:8].reshape(-1).astype(np.float32)
lstm_dincon = df.values[1:,8:9].reshape(-1).astype(np.float32)
# Labic
labic = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits