prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with | tm.ensure_clean("__tmp_to_csv_multiindex__") | pandas._testing.ensure_clean |
# 信用卡违约率分析
import pandas as pd
import seaborn as sns
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# 数据加载
data = pd.read_csv('./UCI_Credit_Card.csv')
# 数据探索
print(data.shape) # 查看数据集大小
print(data.describe()) # 数据集概览
# 查看下一个月违约率的情况
next_month = data['default.payment.next.month'].value_counts()
print(next_month)
df = | pd.DataFrame({'default.payment.next.month': next_month.index, 'values': next_month.values}) | pandas.DataFrame |
# -*- coding: utf-8 -*
'''问卷数据分析工具包
Created on Tue Nov 8 20:05:36 2016
@author: JSong
1、针对问卷星数据,编写并封装了很多常用算法
2、利用report工具包,能将数据直接导出为PPTX
该工具包支持一下功能:
1、编码问卷星、问卷网等数据
2、封装描述统计和交叉分析函数
3、支持生成一份整体的报告和相关数据
'''
import os
import re
import sys
import math
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .. import report as rpt
from .. import associate
__all__=['read_code',
'save_code',
'spec_rcode',
'dataText_to_code',
'dataCode_to_text',
'var_combine',
'wenjuanwang',
'wenjuanxing',
'load_data',
'read_data',
'save_data',
'data_merge',
'clean_ftime',
'data_auto_code',
'qdata_flatten',
'sample_size_cal',
'confidence_interval',
'gof_test',
'chi2_test',
'fisher_exact',
'anova',
'mca',
'cluster',
'scatter',
'sankey',
'qtable',
'association_rules',
'contingency',
'cross_chart',
'summary_chart',
'onekey_gen',
'scorpion']
#=================================================================
#
#
# 【问卷数据处理】
#
#
#==================================================================
def read_code(filename):
'''读取code编码文件并输出为字典格式
1、支持json格式
2、支持本包规定的xlsx格式
see alse to_code
'''
file_type=os.path.splitext(filename)[1][1:]
if file_type == 'json':
import json
code=json.load(filename)
return code
d=pd.read_excel(filename,header=None)
d=d[d.any(axis=1)]#去除空行
d.fillna('NULL',inplace=True)
d=d.as_matrix()
code={}
for i in range(len(d)):
tmp=d[i,0].strip()
if tmp == 'key':
# 识别题号
code[d[i,1]]={}
key=d[i,1]
elif tmp in ['qlist','code_order']:
# 识别字典值为列表的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp2=list(d[i:j,1])
# 列表中字符串的格式化,去除前后空格
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
elif tmp in ['code','code_r']:
# 识别字典值为字典的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
# 识别其他的列表字段
elif (tmp!='NULL') and (d[i,2]=='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
if i==len(d)-1:
code[key][tmp]=d[i,1]
else:
tmp2=list(d[i:j,1])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
# 识别其他的字典字段
elif (tmp!='NULL') and (d[i,2]!='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
elif tmp == 'NULL':
continue
else:
code[key][tmp]=d[i,1]
return code
def save_code(code,filename='code.xlsx'):
'''code本地输出
1、输出为json格式,根据文件名自动识别
2、输出为Excel格式
see also read_code
'''
save_type=os.path.splitext(filename)[1][1:]
if save_type == 'json':
code=pd.DataFrame(code)
code.to_json(filename,force_ascii=False)
return
tmp=pd.DataFrame(columns=['name','value1','value2'])
i=0
if all(['Q' in c[0] for c in code.keys()]):
key_qlist=sorted(code,key=lambda c:int(re.findall('\d+',c)[0]))
else:
key_qlist=code.keys()
for key in key_qlist:
code0=code[key]
tmp.loc[i]=['key',key,'']
i+=1
#print(key)
for key0 in code0:
tmp2=code0[key0]
if (type(tmp2) == list) and tmp2:
tmp.loc[i]=[key0,tmp2[0],'']
i+=1
for ll in tmp2[1:]:
tmp.loc[i]=['',ll,'']
i+=1
elif (type(tmp2) == dict) and tmp2:
try:
tmp2_key=sorted(tmp2,key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp2_key=list(tmp2.keys())
j=0
for key1 in tmp2_key:
if j==0:
tmp.loc[i]=[key0,key1,tmp2[key1]]
else:
tmp.loc[i]=['',key1,tmp2[key1]]
i+=1
j+=1
else:
if tmp2:
tmp.loc[i]=[key0,tmp2,'']
i+=1
if sys.version>'3':
tmp.to_excel(filename,index=False,header=False)
else:
tmp.to_csv(filename,index=False,header=False,encoding='utf-8')
'''问卷数据导入和编码
对每一个题目的情形进行编码:题目默认按照Q1、Q2等给出
Qn.content: 题目内容
Qn.qtype: 题目类型,包含:单选题、多选题、填空题、排序题、矩阵单选题等
Qn.qlist: 题目列表,例如多选题对应着很多小题目
Qn.code: dict,题目选项编码
Qn.code_r: 题目对应的编码(矩阵题目专有)
Qn.code_order: 题目类别的顺序,用于PPT报告的生成[一般后期添加]
Qn.name: 特殊类型,包含:城市题、NPS题等
Qn.weight:dict,每个选项的权重
'''
def dataText_to_code(df,sep,qqlist=None):
'''编码文本数据
'''
if sep in [';','┋']:
qtype='多选题'
elif sep in ['-->','→']:
qtype='排序题'
if not qqlist:
qqlist=df.columns
# 处理多选题
code={}
for qq in qqlist:
tmp=df[qq].map(lambda x : x.split(sep) if isinstance(x,str) else [])
item_list=sorted(set(tmp.sum()))
if qtype == '多选题':
tmp=tmp.map(lambda x: [int(t in x) for t in item_list])
code_tmp={'code':{},'qtype':u'多选题','qlist':[],'content':qq}
elif qtype == '排序题':
tmp=tmp.map(lambda x:[x.index(t)+1 if t in x else np.nan for t in item_list])
code_tmp={'code':{},'qtype':u'排序题','qlist':[],'content':qq}
for i,t in enumerate(item_list):
column_name='{}_A{:.0f}'.format(qq,i+1)
df[column_name]=tmp.map(lambda x:x[i])
code_tmp['code'][column_name]=item_list[i]
code_tmp['qlist']=code_tmp['qlist']+[column_name]
code[qq]=code_tmp
df.drop(qq,axis=1,inplace=True)
return df,code
def dataCode_to_text(df,code=None):
'''将按序号数据转换成文本
'''
if df.max().max()>1:
sep='→'
else:
sep='┋'
if code:
df=df.rename(code)
qlist=list(df.columns)
df['text']=np.nan
if sep in ['┋']:
for i in df.index:
w=df.loc[i,:]==1
df.loc[i,'text']=sep.join(list(w.index[w]))
elif sep in ['→']:
for i in df.index:
w=df.loc[i,:]
w=w[w>=1].sort_values()
df.loc[i,'text']=sep.join(list(w.index))
df.drop(qlist,axis=1,inplace=True)
return df
def var_combine(data,code,qq1,qq2,sep=',',qnum_new=None,qname_new=None):
'''将两个变量组合成一个变量
例如:
Q1:'性别',Q2: 年龄
组合后生成:
1、男_16~19岁
2、男_20岁~40岁
3、女_16~19岁
4、女_20~40岁
'''
if qnum_new is None:
if 'Q'==qq2[0]:
qnum_new=qq1+'_'+qq2[1:]
else:
qnum_new=qq1+'_'+qq2
if qname_new is None:
qname_new=code[qq1]['content']+'_'+code[qq2]['content']
if code[qq1]['qtype']!='单选题' or code[qq2]['qtype']!='单选题':
print('只支持组合两个单选题,请检查.')
raise
d1=data[code[qq1]['qlist'][0]]
d2=data[code[qq2]['qlist'][0]]
sm=max(code[qq1]['code'].keys())# 进位制
sn=max(code[qq2]['code'].keys())# 进位制
if isinstance(sm,str) or isinstance(sn,str):
print('所选择的两个变量不符合函数要求.')
raise
data[qnum_new]=(d1-1)*sn+d2
code[qnum_new]={'qtype':'单选题','qlist':[qnum_new],'content':qname_new}
code_tmp={}
for c1 in code[qq1]['code']:
for c2 in code[qq2]['code']:
cc=(c1-1)*sn+c2
value='{}{}{}'.format(code[qq1]['code'][c1],sep,code[qq2]['code'][c2])
code_tmp[cc]=value
code[qnum_new]['code']=code_tmp
print('变量已合并,新变量题号为:{}'.format(qnum_new))
return data,code
def wenjuanwang(filepath='.\\data',encoding='gbk'):
'''问卷网数据导入和编码
输入:
filepath:
列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件
文件夹路径,函数会自动在文件夹下搜寻相关数据
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
filename3=filepath[2]
elif os.path.isdir(filepath):
filename1=os.path.join(filepath,'All_Data_Readable.csv')
filename2=os.path.join(filepath,'All_Data_Original.csv')
filename3=os.path.join(filepath,'code.csv')
else:
print('can not dection the filepath!')
d1=pd.read_csv(filename1,encoding=encoding)
d1.drop([u'答题时长'],axis=1,inplace=True)
d2=pd.read_csv(filename2,encoding=encoding)
d3=pd.read_csv(filename3,encoding=encoding,header=None,na_filter=False)
d3=d3.as_matrix()
# 遍历code.csv,获取粗略的编码,暂缺qlist,矩阵单选题的code_r
code={}
for i in range(len(d3)):
if d3[i,0]:
key=d3[i,0]
code[key]={}
code[key]['content']=d3[i,1]
code[key]['qtype']=d3[i,2]
code[key]['code']={}
code[key]['qlist']=[]
elif d3[i,2]:
tmp=d3[i,1]
if code[key]['qtype'] in [u'多选题',u'排序题']:
tmp=key+'_A'+'%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist'].append(tmp)
elif code[key]['qtype'] in [u'单选题']:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist']=[key]
elif code[key]['qtype'] in [u'填空题']:
code[key]['qlist']=[key]
else:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
# 更新矩阵单选的code_r和qlist
qnames_Readable=list(d1.columns)
qnames=list(d2.columns)
for key in code.keys():
qlist=[]
for name in qnames:
if re.match(key+'_',name) or key==name:
qlist.append(name)
if ('qlist' not in code[key]) or (not code[key]['qlist']):
code[key]['qlist']=qlist
if code[key]['qtype'] in [u'矩阵单选题']:
tmp=[qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]
code_r=[re.findall('_([^_]*?)$',t)[0] for t in tmp]
code[key]['code_r']=dict(zip(code[key]['qlist'],code_r))
# 处理时间格式
d2['start']=pd.to_datetime(d2['start'])
d2['finish']=pd.to_datetime(d2['finish'])
tmp=d2['finish']-d2['start']
tmp=tmp.astype(str).map(lambda x:60*int(re.findall(':(\d+):',x)[0])+int(re.findall(':(\d+)\.',x)[0]))
ind=np.where(d2.columns=='finish')[0][0]
d2.insert(int(ind)+1,u'答题时长(秒)',tmp)
return (d2,code)
def wenjuanxing(filepath='.\\data',headlen=6):
'''问卷星数据导入和编码
输入:
filepath:
列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本
文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\d+_\d+_0.xls和\d+_\d+_2.xls
headlen: 问卷星数据基础信息的列数
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
#filepath='.\\data'
#headlen=6# 问卷从开始到第一道正式题的数目(一般包含序号,提交答卷时间的等等)
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
elif os.path.isdir(filepath):
filelist=os.listdir(filepath)
n1=n2=0
for f in filelist:
s1=re.findall('\d+_\d+_0.xls',f)
s2=re.findall('\d+_\d+_2.xls',f)
if s1:
filename1=s1[0]
n1+=1
if s2:
filename2=s2[0]
n2+=1
if n1+n2==0:
print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')
return
elif n1+n2>2:
print(u'存在多组问卷星数据,请检查.')
return
filename1=os.path.join(filepath,filename1)
filename2=os.path.join(filepath,filename2)
else:
print('can not dection the filepath!')
d1=pd.read_excel(filename1)
d2=pd.read_excel(filename2)
d2.replace({-2:np.nan,-3:np.nan},inplace=True)
#d1.replace({u'(跳过)':np.nan},inplace=True)
code={}
'''
遍历一遍按文本数据,获取题号和每个题目的类型
'''
for name in d1.columns[headlen:]:
tmp=re.findall(u'^(\d{1,3})[、::]',name)
# 识别多选题、排序题
if tmp:
new_name='Q'+tmp[0]
current_name='Q'+tmp[0]
code[new_name]={}
content=re.findall(u'\d{1,3}[、::](.*)',name)
code[new_name]['content']=content[0]
d1.rename(columns={name:new_name},inplace=True)
code[new_name]['qlist']=[]
code[new_name]['code']={}
code[new_name]['qtype']=''
code[new_name]['name']=''
qcontent=str(list(d1[new_name]))
# 单选题和多选题每个选项都可能有开放题,得识别出来
if ('〖' in qcontent) and ('〗' in qcontent):
code[new_name]['qlist_open']=[]
if '┋' in qcontent:
code[new_name]['qtype']=u'多选题'
elif '→' in qcontent:
code[new_name]['qtype']=u'排序题'
# 识别矩阵单选题
else:
tmp2=re.findall(u'^第(\d{1,3})题\(.*?\)',name)
if tmp2:
new_name='Q'+tmp2[0]
else:
pass
if new_name not in code.keys():
j=1
current_name=new_name
new_name=new_name+'_R%s'%j
code[current_name]={}
code[current_name]['content']=current_name+'(问卷星数据中未找到题目具体内容)'
code[current_name]['qlist']=[]
code[current_name]['code']={}
code[current_name]['code_r']={}
code[current_name]['qtype']=u'矩阵单选题'
code[current_name]['name']=''
#code[current_name]['sample_len']=0
d1.rename(columns={name:new_name},inplace=True)
else:
j+=1
new_name=new_name+'_R%s'%j
d1.rename(columns={name:new_name},inplace=True)
#raise Exception(u"can not dection the NO. of question.")
#print('can not dection the NO. of question')
#print(name)
#pass
# 遍历按序号数据,完整编码
d2qlist=d2.columns[6:].tolist()
for name in d2qlist:
tmp1=re.findall(u'^(\d{1,3})[、::]',name)# 单选题和填空题
tmp2=re.findall(u'^第(.*?)题',name)# 多选题、排序题和矩阵单选题
if tmp1:
current_name='Q'+tmp1[0]# 当前题目的题号
d2.rename(columns={name:current_name},inplace=True)
code[current_name]['qlist'].append(current_name)
#code[current_name]['sample_len']=d2[current_name].count()
ind=d2[current_name].copy()
ind=ind.notnull()
c1=d1.loc[ind,current_name].unique()
c2=d2.loc[ind,current_name].unique()
#print('========= %s========'%current_name)
if (c2.dtype == object) or ((list(c1)==list(c2)) and len(c2)>=min(15,len(d2[ind]))) or (len(c2)>50):
code[current_name]['qtype']=u'填空题'
else:
code[current_name]['qtype']=u'单选题'
#code[current_name]['code']=dict(zip(c2,c1))
if 'qlist_open' in code[current_name].keys():
tmp=d1[current_name].map(lambda x: re.findall('〖(.*?)〗',x)[0] if re.findall('〖(.*?)〗',x) else '')
ind_open=np.argwhere(d2.columns.values==current_name).tolist()[0][0]
d2.insert(ind_open+1,current_name+'_open',tmp)
d1[current_name]=d1[current_name].map(lambda x: re.sub('〖.*?〗','',x))
#c1=d1.loc[ind,current_name].map(lambda x: re.sub('〖.*?〗','',x)).unique()
code[current_name]['qlist_open']=[current_name+'_open']
#c2_tmp=d2.loc[ind,current_name].map(lambda x: int(x) if (('%s'%x!='nan') and not(isinstance(x,str)) and (int(x)==x)) else x)
code[current_name]['code']=dict(zip(d2.loc[ind,current_name],d1.loc[ind,current_name]))
#code[current_name]['code']=dict(zip(c2,c1))
elif tmp2:
name0='Q'+tmp2[0]
# 新题第一个选项
if name0 != current_name:
j=1#记录多选题的小题号
current_name=name0
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
code[current_name]['code']=dict(zip(c2,c1))
#print(dict(zip(c2,c1)))
else:
name1='Q'+tmp2[0]+'_A%s'%j
#code[current_name]['sample_len']=d2[name].notnull().sum()
else:
j+=1#记录多选题的小题号
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
old_dict=code[current_name]['code'].copy()
new_dict=dict(zip(c2,c1))
old_dict.update(new_dict)
code[current_name]['code']=old_dict.copy()
else:
name1='Q'+tmp2[0]+'_A%s'%j
code[current_name]['qlist'].append(name1)
d2.rename(columns={name:name1},inplace=True)
tmp3=re.findall(u'第.*?题\((.*)\)',name)[0]
if code[current_name]['qtype'] == u'矩阵单选题':
code[current_name]['code_r'][name1]=tmp3
else:
code[current_name]['code'][name1]=tmp3
# 识别开放题
if (code[current_name]['qtype'] == u'多选题'):
openq=tmp3+'〖.*?〗'
openq=re.sub('\)','\)',openq)
openq=re.sub('\(','\(',openq)
openq=re.compile(openq)
qcontent=str(list(d1[current_name]))
if re.findall(openq,qcontent):
tmp=d1[current_name].map(lambda x: re.findall(openq,x)[0] if re.findall(openq,x) else '')
ind=np.argwhere(d2.columns.values==name1).tolist()[0][0]
d2.insert(ind+1,name1+'_open',tmp)
code[current_name]['qlist_open'].append(name1+'_open')
# 删除字典中的nan
keys=list(code[current_name]['code'].keys())
for key in keys:
if '%s'%key == 'nan':
del code[current_name]['code'][key]
# 处理一些特殊题目,给它们的选项固定顺序,例如年龄、收入等
for k in code.keys():
content=code[k]['content']
qtype=code[k]['qtype']
if ('code' in code[k]) and (code[k]['code']!={}):
tmp1=code[k]['code'].keys()
tmp2=code[k]['code'].values()
# 识别选项是否是有序变量
tmp3=[len(re.findall('\d+','%s'%v))>0 for v in tmp2]#是否有数字
tmp4=[len(re.findall('-|~','%s'%v))>0 for v in tmp2]#是否有"-"或者"~"
if (np.array(tmp3).sum()>=len(tmp2)-2) or (np.array(tmp4).sum()>=len(tmp2)*0.8-(1e-17)):
try:
tmp_key=sorted(code[k]['code'],key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp_key=list(tmp1)
code_order=[code[k]['code'][v] for v in tmp_key]
code[k]['code_order']=code_order
# 识别矩阵量表题
if qtype=='矩阵单选题':
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if (set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10])) and (len(tmp3)==len(tmp2)):
code[k]['weight']=dict(zip(tmp1,tmp3))
continue
# 识别特殊题型
if ('性别' in content) and ('男' in tmp2) and ('女' in tmp2):
code[k]['name']='性别'
if ('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2):
code[k]['name']='性别'
if (('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum()>=len(tmp2)-1):
code[k]['name']='年龄'
if ('满意度' in content) and ('整体' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='满意度'
if len(tmp3)==len(tmp2):
code[k]['weight']=dict(zip(tmp1,tmp3))
if ('意愿' in content) and ('推荐' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='NPS'
if len(tmp3)==len(tmp2):
weight=pd.Series(dict(zip(tmp1,tmp3)))
weight=weight.replace(dict(zip([0,1,2,3,4,5,6,7,8,9,10],[-100,-100,-100,-100,-100,-100,-100,0,0,100,100])))
code[k]['weight']=weight.to_dict()
try:
d2[u'所用时间']=d2[u'所用时间'].map(lambda s: int(s[:-1]))
except:
pass
return (d2,code)
def load_data(method='filedialog',**kwargs):
'''导入问卷数据
# 暂时只支持已编码的和问卷星数据
1、支持路径搜寻
2、支持自由选择文件
method:
-filedialog: 打开文件窗口选择
-pathsearch:自带搜索路径,需提供filepath
'''
if method=='filedialog':
import tkinter as tk
from tkinter.filedialog import askopenfilenames
tk.Tk().withdraw();
#print(u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)')
if 'initialdir' in kwargs:
initialdir=kwargs['initialdir']
elif os.path.isdir('.\\data'):
initialdir = ".\\data"
else:
initialdir = "."
title =u"请选择编码所需要的数据文件(支持问卷星和已编码好的数据)"
filetypes = (("Excel files","*.xls;*.xlsx"),("CSV files","*.csv"),("all files","*.*"))
filenames=[]
while len(filenames)<1:
filenames=askopenfilenames(initialdir=initialdir,title=title,filetypes=filetypes)
if len(filenames)<1:
print('请至少选择一个文件.')
filenames=list(filenames)
elif method == 'pathsearch':
if 'filepath' in kwargs:
filepath=kwargs['filepath']
else :
filepath='.\\data\\'
if os.path.isdir(filepath):
filenames=os.listdir(filepath)
filenames=[os.path.join(filepath,s) for s in filenames]
else:
print('搜索路径错误')
raise
info=[]
for filename in filenames:
filename_nopath=os.path.split(filename)[1]
data=read_data(filename)
# 第一列包含的字段
field_c1=set(data.iloc[:,0].dropna().unique())
field_r1=set(data.columns)
# 列名是否包含Q
hqlen=[len(re.findall('^[qQ]\d+',c))>0 for c in field_r1]
hqrate=hqlen.count(True)/len(field_r1) if len(field_r1)>0 else 0
rowlens,collens=data.shape
# 数据中整数/浮点数的占比
rate_real=data.applymap(lambda x:isinstance(x,(int,float))).sum().sum()/rowlens/collens
tmp={'filename':filename_nopath,'filenametype':'','rowlens':rowlens,'collens':collens,\
'field_c1':field_c1,'field_r1':field_r1,'type':'','rate_real':rate_real}
if len(re.findall('^data.*\.xls',filename_nopath))>0:
tmp['filenametype']='data'
elif len(re.findall('^code.*\.xls',filename_nopath))>0:
tmp['filenametype']='code'
elif len(re.findall('\d+_\d+_\d.xls',filename_nopath))>0:
tmp['filenametype']='wenjuanxing'
if tmp['filenametype']=='code' or set(['key','code','qlist','qtype']) < field_c1:
tmp['type']='code'
if tmp['filenametype']=='wenjuanxing' or len(set(['序号','提交答卷时间','所用时间','来自IP','来源','来源详情','总分'])&field_r1)>=5:
tmp['type']='wenjuanxing'
if tmp['filenametype']=='data' or hqrate>=0.5:
tmp['type']='data'
info.append(tmp)
questype=[k['type'] for k in info]
# 这里有一个优先级存在,优先使用已编码好的数据,其次是问卷星数据
if questype.count('data')*questype.count('code')==1:
data=read_data(filenames[questype.index('data')])
code=read_code(filenames[questype.index('code')])
elif questype.count('wenjuanxing')>=2:
filenames=[(f,info[i]['rate_real']) for i,f in enumerate(filenames) if questype[i]=='wenjuanxing']
tmp=[]
for f,rate_real in filenames:
t2=0 if rate_real<0.5 else 2
d=pd.read_excel(f)
d=d.iloc[:,0]
tmp.append((t2,d))
#print('添加{}'.format(t2))
tmp_equal=0
for t,d0 in tmp[:-1]:
if len(d)==len(d0) and all(d==d0):
tmp_equal+=1
tmp[-1]=(t2+int(t/10)*10,tmp[-1][1])
max_quesnum=max([int(t/10) for t,d in tmp])
if tmp_equal==0:
tmp[-1]=(tmp[-1][0]+max_quesnum*10+10,tmp[-1][1])
#print('修改为{}'.format(tmp[-1][0]))
# 重新整理所有的问卷数据
questype=[t for t,d in tmp]
filenames=[f for f,r in filenames]
quesnums=max([int(t/10) for t in questype])#可能存在的数据组数
filename_wjx=[]
for i in range(1,quesnums+1):
if questype.count(i*10)==1 and questype.count(i*10+2)==1:
filename_wjx.append([filenames[questype.index(i*10)],filenames[questype.index(i*10+2)]])
if len(filename_wjx)==1:
data,code=wenjuanxing(filename_wjx[0])
elif len(filename_wjx)>1:
print('脚本识别出多组问卷星数据,请选择需要编码的数据:')
for i,f in enumerate(filename_wjx):
print('{}: {}'.format(i+1,'/'.join([os.path.split(f[0])[1],os.path.split(f[1])[1]])))
ii=input('您选择的数据是(数据前的编码,如:1):')
ii=re.sub('\s','',ii)
if ii.isnumeric():
data,code=wenjuanxing(filename_wjx[int(ii)-1])
else:
print('您输入正确的编码.')
else:
print('没有找到任何问卷数据..')
raise
else:
print('没有找到任何数据')
raise
return data,code
def spec_rcode(data,code):
city={'北京':0,'上海':0,'广州':0,'深圳':0,'成都':1,'杭州':1,'武汉':1,'天津':1,'南京':1,'重庆':1,'西安':1,'长沙':1,'青岛':1,'沈阳':1,'大连':1,'厦门':1,'苏州':1,'宁波':1,'无锡':1,\
'福州':2,'合肥':2,'郑州':2,'哈尔滨':2,'佛山':2,'济南':2,'东莞':2,'昆明':2,'太原':2,'南昌':2,'南宁':2,'温州':2,'石家庄':2,'长春':2,'泉州':2,'贵阳':2,'常州':2,'珠海':2,'金华':2,\
'烟台':2,'海口':2,'惠州':2,'乌鲁木齐':2,'徐州':2,'嘉兴':2,'潍坊':2,'洛阳':2,'南通':2,'扬州':2,'汕头':2,'兰州':3,'桂林':3,'三亚':3,'呼和浩特':3,'绍兴':3,'泰州':3,'银川':3,'中山':3,\
'保定':3,'西宁':3,'芜湖':3,'赣州':3,'绵阳':3,'漳州':3,'莆田':3,'威海':3,'邯郸':3,'临沂':3,'唐山':3,'台州':3,'宜昌':3,'湖州':3,'包头':3,'济宁':3,'盐城':3,'鞍山':3,'廊坊':3,'衡阳':3,\
'秦皇岛':3,'吉林':3,'大庆':3,'淮安':3,'丽江':3,'揭阳':3,'荆州':3,'连云港':3,'张家口':3,'遵义':3,'上饶':3,'龙岩':3,'衢州':3,'赤峰':3,'湛江':3,'运城':3,'鄂尔多斯':3,'岳阳':3,'安阳':3,\
'株洲':3,'镇江':3,'淄博':3,'郴州':3,'南平':3,'齐齐哈尔':3,'常德':3,'柳州':3,'咸阳':3,'南充':3,'泸州':3,'蚌埠':3,'邢台':3,'舟山':3,'宝鸡':3,'德阳':3,'抚顺':3,'宜宾':3,'宜春':3,'怀化':3,\
'榆林':3,'梅州':3,'呼伦贝尔':3,'临汾':4,'南阳':4,'新乡':4,'肇庆':4,'丹东':4,'德州':4,'菏泽':4,'九江':4,'江门市':4,'黄山':4,'渭南':4,'营口':4,'娄底':4,'永州市':4,'邵阳':4,'清远':4,\
'大同':4,'枣庄':4,'北海':4,'丽水':4,'孝感':4,'沧州':4,'马鞍山':4,'聊城':4,'三明':4,'开封':4,'锦州':4,'汉中':4,'商丘':4,'泰安':4,'通辽':4,'牡丹江':4,'曲靖':4,'东营':4,'韶关':4,'拉萨':4,\
'襄阳':4,'湘潭':4,'盘锦':4,'驻马店':4,'酒泉':4,'安庆':4,'宁德':4,'四平':4,'晋中':4,'滁州':4,'衡水':4,'佳木斯':4,'茂名':4,'十堰':4,'宿迁':4,'潮州':4,'承德':4,'葫芦岛':4,'黄冈':4,'本溪':4,\
'绥化':4,'萍乡':4,'许昌':4,'日照':4,'铁岭':4,'大理州':4,'淮南':4,'延边州':4,'咸宁':4,'信阳':4,'吕梁':4,'辽阳':4,'朝阳':4,'恩施州':4,'达州市':4,'益阳市':4,'平顶山':4,'六安':4,'延安':4,\
'梧州':4,'白山':4,'阜阳':4,'铜陵市':4,'河源':4,'玉溪市':4,'黄石':4,'通化':4,'百色':4,'乐山市':4,'抚州市':4,'钦州':4,'阳江':4,'池州市':4,'广元':4,'滨州':5,'阳泉':5,'周口市':5,'遂宁':5,\
'吉安':5,'长治':5,'铜仁':5,'鹤岗':5,'攀枝花':5,'昭通':5,'云浮':5,'伊犁州':5,'焦作':5,'凉山州':5,'黔西南州':5,'广安':5,'新余':5,'锡林郭勒':5,'宣城':5,'兴安盟':5,'红河州':5,'眉山':5,\
'巴彦淖尔':5,'双鸭山市':5,'景德镇市':5,'鸡西':5,'三门峡':5,'宿州':5,'汕尾':5,'阜新':5,'张掖':5,'玉林':5,'乌兰察布':5,'鹰潭':5,'黑河':5,'伊春':5,'贵港市':5,'漯河':5,'晋城':5,'克拉玛依':5,\
'随州':5,'保山':5,'濮阳':5,'文山州':5,'嘉峪关':5,'六盘水':5,'乌海':5,'自贡':5,'松原':5,'内江':5,'黔东南州':5,'鹤壁':5,'德宏州':5,'安顺':5,'资阳':5,'鄂州':5,'忻州':5,'荆门':5,'淮北':5,\
'毕节':5,'巴音郭楞':5,'防城港':5,'天水':5,'黔南州':5,'阿坝州':5,'石嘴山':5,'安康':5,'亳州市':5,'昌吉州':5,'普洱':5,'楚雄州':5,'白城':5,'贺州':5,'哈密':5,'来宾':5,'庆阳':5,'河池':5,\
'张家界 雅安':5,'辽源':5,'湘西州':5,'朔州':5,'临沧':5,'白银':5,'塔城地区':5,'莱芜':5,'迪庆州':5,'喀什地区':5,'甘孜州':5,'阿克苏':5,'武威':5,'巴中':5,'平凉':5,'商洛':5,'七台河':5,'金昌':5,\
'中卫':5,'阿勒泰':5,'铜川':5,'海西州':5,'吴忠':5,'固原':5,'吐鲁番':5,'阿拉善盟':5,'博尔塔拉州':5,'定西':5,'西双版纳':5,'陇南':5,'大兴安岭':5,'崇左':5,'日喀则':5,'临夏州':5,'林芝':5,\
'海东':5,'怒江州':5,'和田地区':5,'昌都':5,'儋州':5,'甘南州':5,'山南':5,'海南州':5,'海北州':5,'玉树州':5,'阿里地区':5,'那曲地区':5,'黄南州':5,'克孜勒苏州':5,'果洛州':5,'三沙':5}
code_keys=list(code.keys())
for qq in code_keys:
qlist=code[qq]['qlist']
#qtype=code[qq]['qtype']
content=code[qq]['content']
ind=list(data.columns).index(qlist[-1])
data1=data[qlist]
'''
识别问卷星中的城市题
'''
tf1=u'城市' in content
tf2=data1[data1.notnull()].applymap(lambda x:'-' in '%s'%x).all().all()
tf3=(qq+'a' not in data.columns) and (qq+'b' not in data.columns)
if tf1 and tf2 and tf3:
# 省份和城市
tmp1=data[qq].map(lambda x:x.split('-')[0])
tmp2=data[qq].map(lambda x:x.split('-')[1])
tmp2[tmp1==u'上海']=u'上海'
tmp2[tmp1==u'北京']=u'北京'
tmp2[tmp1==u'天津']=u'天津'
tmp2[tmp1==u'重庆']=u'重庆'
tmp2[tmp1==u'香港']=u'香港'
tmp2[tmp1==u'澳门']=u'澳门'
data.insert(ind+1,qq+'a',tmp1)
data.insert(ind+2,qq+'b',tmp2)
code[qq+'a']={'content':'省份','qtype':'填空题','qlist':[qq+'a']}
code[qq+'b']={'content':'城市','qtype':'填空题','qlist':[qq+'b']}
tmp3=data[qq+'b'].map(lambda x: city[x] if x in city.keys() else x)
tmp3=tmp3.map(lambda x: 6 if isinstance(x,str) else x)
data.insert(ind+3,qq+'c',tmp3)
code[qq+'c']={'content':'城市分级','qtype':'单选题','qlist':[qq+'c'],\
'code':{0:'北上广深',1:'新一线',2:'二线',3:'三线',4:'四线',5:'五线',6:'五线以下'}}
return data,code
def levenshtein(s, t):
''''' From Wikipedia article; Iterative with two matrix rows. '''
if s == t: return 0
elif len(s) == 0: return len(t)
elif len(t) == 0: return len(s)
v0 = [None] * (len(t) + 1)
v1 = [None] * (len(t) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(s)):
v1[0] = i + 1
for j in range(len(t)):
cost = 0 if s[i] == t[j] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(t)]
def code_similar(code1,code2):
'''
题目内容相似度用最小编辑距离来度量
选项相似度分为几种
1、完全相同:1
2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2
2、多选题/排序题:不考虑序号,共同变量超过一半即可:3
3、矩阵单选题:code_r 暂时只考虑完全匹配
4、其他情况为0
'''
code_distance_min=pd.DataFrame(index=code1.keys(),columns=['qnum','similar_content','similar_code'])
for c1 in code1:
# 计算题目内容的相似度
disstance_str=pd.Series(index=code2.keys())
for c2 in code2:
if code1[c1]['qtype']==code2[c2]['qtype']:
disstance_str[c2]=levenshtein(code1[c1]['content'], code2[c2]['content'])
c2=disstance_str.idxmin()
if '%s'%c2 == 'nan':
continue
min_len=(len(code1[c1]['content'])+len(code2[c2]['content']))/2
similar_content=100-100*disstance_str[c2]/min_len if min_len>0 else 0
# 计算选项的相似度
qtype=code2[c2]['qtype']
if qtype == '单选题':
t1=code1[c1]['code']
t2=code2[c2]['code']
inner_key=list(set(t1.keys())&set(t2.keys()))
tmp=all([t1[c]==t2[c] for c in inner_key])
if t1==t2:
similar_code=1
elif len(inner_key)>=0.5*len(set(t1.keys())|set(t2.keys())) and tmp:
similar_code=2
else:
similar_code=0
elif qtype in ['多选题','排序题']:
t1=code1[c1]['code']
t2=code2[c2]['code']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['矩阵多选题']:
t1=code1[c1]['code_r']
t2=code2[c2]['code_r']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['填空题']:
similar_code=1
else:
similar_code=0
code_distance_min.loc[c1,'qnum']=c2
code_distance_min.loc[c1,'similar_content']=similar_content
code_distance_min.loc[c1,'similar_code']=similar_code
# 剔除qnum中重复的值
code_distance_min=code_distance_min.sort_values(['qnum','similar_content','similar_code'],ascending=[False,False,True])
code_distance_min.loc[code_distance_min.duplicated(['qnum']),:]=np.nan
code_distance_min=pd.DataFrame(code_distance_min,index=code1.keys())
return code_distance_min
def data_merge(ques1,ques2,qlist1=None,qlist2=None,name1='ques1',name2='ques2',\
mergeqnum='Q0',similar_threshold=70):
'''合并两份数据
ques1: 列表,[data1,code1]
ques2: 列表,[data2,code2]
'''
data1,code1=ques1
data2,code2=ques2
if (qlist1 is None) or (qlist2 is None):
qlist1=[]
qlist2=[]
qqlist1=[]
qqlist2=[]
code_distance_min=code_similar(code1,code2)
code1_key=sorted(code1,key=lambda x:int(re.findall('\d+',x)[0]))
for c1 in code1_key:
qtype1=code1[c1]['qtype']
#print('{}:{}'.format(c1,code1[c1]['content']))
rs_qq=code_distance_min.loc[c1,'qnum']
similar_content=code_distance_min.loc[c1,'similar_content']
similar_code=code_distance_min.loc[c1,'similar_code']
if (similar_content>=similar_threshold) and (similar_code in [1,2]):
#print('推荐合并第二份数据中的{}({}), 两个题目相似度为为{:.0f}%'.format(rs_qq,code2[rs_qq]['content'],similar))
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
user_qq=rs_qq
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(rs_qq)
elif (similar_content>=similar_threshold) and (similar_code==3):
# 针对非单选题,此时要调整选项顺序
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t1_qlist=code1[c1]['qlist']
t1_value=[t1[k] for k in t1_qlist]
t2=code2[rs_qq]['code_r'] if qtype1 =='矩阵单选题' else code2[rs_qq]['code']
t2_qlist=code2[rs_qq]['qlist']
t2_value=[t2[k] for k in t2_qlist]
# 保留相同的选项
t1_qlist_new=[q for q in t1_qlist if t1[q] in list(set(t1_value)&set(t2_value))]
t2_r=dict(zip([s[1] for s in t2.items()],[s[0] for s in t2.items()]))
t2_qlist_new=[t2_r[s] for s in [t1[q] for q in t1_qlist_new]]
code1[c1]['qlist']=t1_qlist_new
code1[c1]['code']={k:t1[k] for k in t1_qlist_new}
qqlist1+=t1_qlist_new
qqlist2+=t2_qlist_new
qlist1.append(c1)
qlist2.append(rs_qq)
print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1,rs_qq))
elif similar_code in [1,2]:
print('-'*40)
print('为【 {}:{} 】自动匹配到: '.format(c1,code1[c1]['content']))
print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq,code2[rs_qq]['content'],similar_content))
tmp=input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['yes','y']:
user_qq=rs_qq
elif tmp in ['no','n']:
user_qq=None
else:
tmp=re.sub('^q','Q',tmp)
if tmp not in code2:
user_qq=None
elif (tmp in code2) and (tmp!=rs_qq):
print('您输入的是{}:{}'.format(tmp,code2[tmp]['content']))
user_qq=tmp
if user_qq==rs_qq:
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
elif user_qq is not None:
# 比对两道题目的code
if 'code' in code1[c1] and len(code1[c1]['code'])>0:
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t2=code2[user_qq]['code_r'] if code2[user_qq]['qtype'] =='矩阵单选题' else code2[user_qq]['code']
if set(t1.values())==set(t2.values()):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('两个题目的选项不匹配,将自动跳过.')
else:
qqlist1+=[code1[c1]['qlist'][0]]
qqlist2+=[code2[user_qq]['qlist'][0]]
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('将自动跳过: {}'.format(c1))
print('-'*40)
else:
print('将自动跳过: {}'.format(c1))
tmp=input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['no','n']:
print('请确保接下来您要合并的题目类型和选项完全一样.')
while 1:
tmp=input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')
tmp=re.sub('\s','',tmp)# 去掉空格
tmp=re.sub(',',',',tmp)# 修正可能错误的逗号
tmp=tmp.split(',')
tmp=[re.sub('^q','Q',qq) for qq in tmp]
if len(tmp)<2:
break
if tmp[0] in qlist1 or tmp[1] in qlist2:
print('该题已经被合并,请重新输入')
continue
if tmp[0] not in code1 or tmp[1] not in code2:
print('输入错误, 请重新输入')
continue
c1=tmp[0]
c2=tmp[1]
print('您输入的是:')
print('第一份数据中的【 {}:{} 】'.format(c1,code1[c1]['content']))
print('第二份数据中的【 {}:{} 】'.format(c2,code2[c2]['content']))
w=code_similar({c1:code1[c1]},{c2:code2[c2]})
similar_code=w.loc[c1,'similar_code']
if similar_code in [1,2] and len(code1[c1]['qlist'])==len(code2[c2]['qlist']):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[c2]['qlist']
qlist1.append(c1)
qlist2.append(c2)
print('将自动合并: {} 和 {}'.format(c1,c2))
else:
print('选项不匹配,请重新输入')
else:
qqlist1=[]
for qq in qlist1:
qqlist1=qqlist1+code1[qq]['qlist']
qqlist2=[]
for qq in qlist2:
qqlist2=qqlist2+code2[qq]['qlist']
# 将题号列表转化成data中的列名
if mergeqnum in qqlist1:
mergeqnum=mergeqnum+'merge'
data1=data1.loc[:,qqlist1]
data1.loc[:,mergeqnum]=1
data2=data2.loc[:,qqlist2]
data2.loc[:,mergeqnum]=2
if len(qqlist1)!=len(qqlist2):
print('两份数据选项不完全匹配,请检查....')
raise
data2=data2.rename(columns=dict(zip(qqlist2,qqlist1)))
data12=data1.append(data2,ignore_index=True)
code12={}
for i,cc in enumerate(qlist1):
code12[cc]=code1[cc]
if 'code' in code1[cc] and 'code' in code2[qlist2[i]]:
code12[cc]['code'].update(code2[qlist2[i]]['code'])
code12[mergeqnum]={'content':u'来源','code':{1:name1,2:name2},'qtype':u'单选题','qlist':[mergeqnum]}
return data12,code12
## ===========================================================
#
#
# 数据清洗 #
#
#
## ==========================================================
def clean_ftime(ftime,cut_percent=0.25):
'''
ftime 是完成问卷的秒数
思路:
1、只考虑截断问卷完成时间较小的样本
2、找到完成时间变化的拐点,即需要截断的时间点
返回:r
建议截断<r的样本
'''
t_min=int(ftime.min())
t_cut=int(ftime.quantile(cut_percent))
x=np.array(range(t_min,t_cut))
y=np.array([len(ftime[ftime<=i]) for i in range(t_min,t_cut)])
z1 = np.polyfit(x, y, 4) # 拟合得到的函数
z2=np.polyder(z1,2) #求二阶导数
r=np.roots(np.polyder(z2,1))
r=int(r[0])
return r
## ===========================================================
#
#
# 数据分析和输出 #
#
#
## ==========================================================
def data_auto_code(data):
'''智能判断问卷数据
输入
data: 数据框,列名需要满足Qi或者Qi_
输出:
code: 自动编码
'''
data=pd.DataFrame(data)
columns=data.columns
columns=[c for c in columns if re.match('Q\d+',c)]
code={}
for cc in columns:
# 识别题目号
if '_' not in cc:
key=cc
else:
key=cc.split('_')[0]
# 新的题目则产生新的code
if key not in code:
code[key]={}
code[key]['qlist']=[]
code[key]['code']={}
code[key]['content']=key
code[key]['qtype']=''
# 处理各题目列表
if key == cc:
code[key]['qlist']=[key]
elif re.findall('^'+key+'_[a-zA-Z]{0,}\d+$',cc):
code[key]['qlist'].append(cc)
else:
if 'qlist_open' in code[key]:
code[key]['qlist_open'].append(cc)
else:
code[key]['qlist_open']=[cc]
for kk in code.keys():
dd=data[code[kk]['qlist']]
# 单选题和填空题
if len(dd.columns)==1:
tmp=dd[dd.notnull()].iloc[:,0].unique()
if dd.iloc[:,0].value_counts().mean() >=2:
code[kk]['qtype']=u'单选题'
code[kk]['code']=dict(zip(tmp,tmp))
else:
code[kk]['qtype']=u'填空题'
del code[kk]['code']
else:
tmp=set(dd[dd.notnull()].as_matrix().flatten())
if set(tmp)==set([0,1]):
code[kk]['qtype']=u'多选题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
elif 'R' in code[kk]['qlist'][0]:
code[kk]['qtype']=u'矩阵单选题'
code[kk]['code_r']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
code[kk]['code']=dict(zip(list(tmp),list(tmp)))
else:
code[kk]['qtype']=u'排序题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
return code
def save_data(data,filename=u'data.xlsx',code=None):
'''保存问卷数据到本地
根据filename后缀选择相应的格式保存
如果有code,则保存按文本数据
'''
savetype=os.path.splitext(filename)[1][1:]
data1=data.copy()
if code:
for qq in code.keys():
qtype=code[qq]['qtype']
qlist=code[qq]['qlist']
if qtype == u'单选题':
# 将序号换成文本,题号加上具体内容
data1[qlist[0]].replace(code[qq]['code'],inplace=True)
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
elif qtype == u'矩阵单选题':
# 同单选题
data1[code[qq]['qlist']].replace(code[qq]['code'],inplace=True)
tmp1=code[qq]['qlist']
tmp2=['{}({})'.format(q,code[qq]['code_r'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'排序题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
tmp1=code[qq]['qlist']
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'多选题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
for q in qlist:
data1[q].replace({0:'',1:code[qq]['code'][q]},inplace=True)
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in qlist]
data1.rename(columns=dict(zip(qlist,tmp2)),inplace=True)
else:
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
if (savetype == u'xlsx') or (savetype == u'xls'):
data1.to_excel(filename,index=False)
elif savetype == u'csv':
data1.to_csv(filename,index=False)
def read_data(filename):
savetype=os.path.splitext(filename)[1][1:]
if (savetype==u'xlsx') or (savetype==u'xls'):
data=pd.read_excel(filename)
elif savetype==u'csv':
data=pd.read_csv(filename)
else:
print('con not read file!')
return data
def sa_to_ma(data):
'''单选题数据转换成多选题数据
data是单选题数据, 要求非有效列别为nan
可以使用内置函数pd.get_dummies()代替
'''
if isinstance(data,pd.core.frame.DataFrame):
data=data[data.columns[0]]
#categorys=sorted(data[data.notnull()].unique())
categorys=data[data.notnull()].unique()
try:
categorys=sorted(categorys)
except:
pass
#print('sa_to_ma function::cannot sorted')
data_ma=pd.DataFrame(index=data.index,columns=categorys)
for c in categorys:
data_ma[c]=data.map(lambda x : int(x==c))
data_ma.loc[data.isnull(),:]=np.nan
return data_ma
def to_dummpy(data,code,qqlist=None,qtype_new='多选题',ignore_open=True):
'''转化成哑变量
将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题
返回一个很大的只有0和1的数据
'''
if qqlist is None:
qqlist=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))
bdata=pd.DataFrame()
bcode={}
for qq in qqlist:
qtype=code[qq]['qtype']
data0=data[code[qq]['qlist']]
if qtype=='单选题':
data0=data0.iloc[:,0]
categorys=data0[data0.notnull()].unique()
try:
categorys=sorted(categorys)
except :
pass
categorys=[t for t in categorys if t in code[qq]['code']]
cname=[code[qq]['code'][k] for k in categorys]
columns_name=['{}_A{}'.format(qq,i+1) for i in range(len(categorys))]
tmp=pd.DataFrame(index=data0.index,columns=columns_name)
for i,c in enumerate(categorys):
tmp[columns_name[i]]=data0.map(lambda x : int(x==c))
#tmp.loc[data0.isnull(),:]=0
code_tmp={'content':code[qq]['content'],'qtype':qtype_new}
code_tmp['code']=dict(zip(columns_name,cname))
code_tmp['qlist']=columns_name
bcode.update({qq:code_tmp})
bdata=pd.concat([bdata,tmp],axis=1)
elif qtype in ['多选题','排序题','矩阵单选题']:
bdata=pd.concat([bdata,data0],axis=1)
bcode.update({qq:code[qq]})
bdata=bdata.fillna(0)
try:
bdata=bdata.astype(np.int64,raise_on_error=False)
except :
pass
return bdata,bcode
def qdata_flatten(data,code,quesid=None,userid_begin=None):
'''将问卷数据展平,字段如下
userid: 用户ID
quesid: 问卷ID
qnum: 题号
qname: 题目内容
qtype: 题目类型
samplelen:题目的样本数
itemnum: 选项序号
itemname: 选项内容
code: 用户的选择
codename: 用户选择的具体值
count: 计数
percent(%): 计数占比(百分比)
'''
if not userid_begin:
userid_begin=1000000
data.index=[userid_begin+i+1 for i in range(len(data))]
if '提交答卷时间' in data.columns:
begin_date=pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')
end_date=pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')
else:
begin_date=''
end_date=''
data,code=to_dummpy(data,code,qtype_new='单选题')
code_item={}
for qq in code:
if code[qq]['qtype']=='矩阵单选题':
code_item.update(code[qq]['code_r'])
else :
code_item.update(code[qq]['code'])
qdata=data.stack().reset_index()
qdata.columns=['userid','qn_an','code']
qdata['qnum']=qdata['qn_an'].map(lambda x:x.split('_')[0])
qdata['itemnum']=qdata['qn_an'].map(lambda x:'_'.join(x.split('_')[1:]))
if quesid:
qdata['quesid']=quesid
qdata=qdata[['userid','quesid','qnum','itemnum','code']]
else:
qdata=qdata[['userid','qnum','itemnum','code']]
# 获取描述统计信息:
samplelen=qdata.groupby(['userid','qnum'])['code'].sum().map(lambda x:int(x>0)).unstack().sum()
quesinfo=qdata.groupby(['qnum','itemnum','code'])['code'].count()
quesinfo.name='count'
quesinfo=quesinfo.reset_index()
quesinfo=quesinfo[quesinfo['code']!=0]
#quesinfo=qdata.groupby(['quesid','qnum','itemnum'])['code'].sum()
quesinfo['samplelen']=quesinfo['qnum'].replace(samplelen.to_dict())
quesinfo['percent(%)']=0
quesinfo.loc[quesinfo['samplelen']>0,'percent(%)']=100*quesinfo.loc[quesinfo['samplelen']>0,'count']/quesinfo.loc[quesinfo['samplelen']>0,'samplelen']
quesinfo['qname']=quesinfo['qnum'].map(lambda x: code[x]['content'])
quesinfo['qtype']=quesinfo['qnum'].map(lambda x: code[x]['qtype'])
quesinfo['itemname']=quesinfo['qnum']+quesinfo['itemnum'].map(lambda x:'_%s'%x)
quesinfo['itemname']=quesinfo['itemname'].replace(code_item)
#quesinfo['itemname']=quesinfo['qn_an'].map(lambda x: code[x.split('_')[0]]['code_r'][x] if \
#code[x.split('_')[0]]['qtype']=='矩阵单选题' else code[x.split('_')[0]]['code'][x])
# 各个选项的含义
quesinfo['codename']=''
quesinfo.loc[quesinfo['code']==0,'codename']='否'
quesinfo.loc[quesinfo['code']==1,'codename']='是'
quesinfo['tmp']=quesinfo['qnum']+quesinfo['code'].map(lambda x:'_%s'%int(x))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='矩阵单选题'),'tmp']\
.map(lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])]))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='排序题'),'tmp'].map(lambda x: 'Top{}'.format(x.split('_')[1])))
quesinfo['begin_date']=begin_date
quesinfo['end_date']=end_date
if quesid:
quesinfo['quesid']=quesid
quesinfo=quesinfo[['quesid','begin_date','end_date','qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
else:
quesinfo=quesinfo[['qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
# 排序
quesinfo['qnum']=quesinfo['qnum'].astype('category')
quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo['itemnum']=quesinfo['itemnum'].astype('category')
quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo=quesinfo.sort_values(['qnum','itemnum','code'])
return qdata,quesinfo
def confidence_interval(p,n,alpha=0.05):
import scipy.stats as stats
t=stats.norm.ppf(1-alpha/2)
ci=t*math.sqrt(p*(1-p)/n)
#a=p-stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
#b=p+stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
return ci
def sample_size_cal(interval,N,alpha=0.05):
'''调研样本量的计算
参考:https://www.surveysystem.com/sscalc.htm
sample_size_cal(interval,N,alpha=0.05)
输入:
interval: 误差范围,例如0.03
N: 总体的大小,一般1万以上就没啥差别啦
alpha:置信水平,默认95%
'''
import scipy.stats as stats
p=stats.norm.ppf(1-alpha/2)
if interval>1:
interval=interval/100
samplesize=p**2/4/interval**2
if N:
samplesize=samplesize*N/(samplesize+N)
samplesize=int(round(samplesize))
return samplesize
def gof_test(fo,fe=None,alpha=0.05):
'''拟合优度检验
输入:
fo:观察频数
fe:期望频数,缺省为平均数
返回:
1: 样本与总体有差异
0:样本与总体无差异
例子:
gof_test(np.array([0.3,0.4,0.3])*222)
'''
import scipy.stats as stats
fo=np.array(fo).flatten()
C=len(fo)
if not fe:
N=fo.sum()
fe=np.array([N/C]*C)
else:
fe=np.array(fe).flatten()
chi_value=(fo-fe)**2/fe
chi_value=chi_value.sum()
chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)
#CV=np.sqrt((fo-fe)**2/fe**2/(C-1))*100
if chi_value>chi_value_fit:
result=1
else:
result=0
return result
def chi2_test(fo,alpha=0.05):
import scipy.stats as stats
fo=pd.DataFrame(fo)
chiStats = stats.chi2_contingency(observed=fo)
#critical_value = stats.chi2.ppf(q=1-alpha,df=chiStats[2])
#observed_chi_val = chiStats[0]
# p<alpha 等价于 observed_chi_val>critical_value
chi2_data=(chiStats[1] <= alpha,chiStats[1])
return chi2_data
def fisher_exact(fo,alpha=0.05):
'''fisher_exact 显著性检验函数
此处采用的是调用R的解决方案,需要安装包 pyper
python解决方案参见
https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/
但还有些问题,所以没用.
'''
import pyper as pr
r=pr.R(use_pandas=True,use_numpy=True)
r.assign('fo',fo)
r("b<-fisher.test(fo)")
pdata=r['b']
p_value=pdata['p.value']
if p_value<alpha:
result=1
else:
result=0
return (result,p_value)
def anova(data,formula):
'''方差分析
输入
--data: DataFrame格式,包含数值型变量和分类型变量
--formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]
返回[方差分析表]
[总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]
--df:自由度
--sum_sq:误差平方和
--mean_sq:误差平方和/对应的自由度
--F:mean_sq之比
--PR(>F):p值,比如<0.05则代表有显著性差异
'''
import statsmodels.api as sm
from statsmodels.formula.api import ols
cw_lm=ols(formula, data=data).fit() #Specify C for Categorical
r=sm.stats.anova_lm(cw_lm)
return r
def mca(X,N=2):
'''对应分析函数,暂时支持双因素
X:观察频数表
N:返回的维数,默认2维
可以通过scatter函数绘制:
fig=scatter([pr,pc])
fig.savefig('mca.png')
'''
from scipy.linalg import diagsvd
S = X.sum().sum()
Z = X / S # correspondence matrix
r = Z.sum(axis=1)
c = Z.sum()
D_r = np.diag(1/np.sqrt(r))
Z_c = Z - np.outer(r, c) # standardized residuals matrix
D_c = np.diag(1/np.sqrt(c))
# another option, not pursued here, is sklearn.decomposition.TruncatedSVD
P,s,Q = np.linalg.svd(np.dot(np.dot(D_r, Z_c),D_c))
#S=diagsvd(s[:2],P.shape[0],2)
pr=np.dot(np.dot(D_r,P),diagsvd(s[:N],P.shape[0],N))
pc=np.dot(np.dot(D_c,Q.T),diagsvd(s[:N],Q.shape[0],N))
inertia=np.cumsum(s**2)/np.sum(s**2)
inertia=inertia.tolist()
if isinstance(X,pd.DataFrame):
pr=pd.DataFrame(pr,index=X.index,columns=list('XYZUVW')[:N])
pc=pd.DataFrame(pc,index=X.columns,columns=list('XYZUVW')[:N])
return pr,pc,inertia
'''
w=pd.ExcelWriter(u'mca_.xlsx')
pr.to_excel(w,startrow=0,index_label=True)
pc.to_excel(w,startrow=len(pr)+2,index_label=True)
w.save()
'''
def cluster(data,code,cluster_qq,n_clusters='auto',max_clusters=7):
'''对态度题进行聚类
'''
from sklearn.cluster import KMeans
#from sklearn.decomposition import PCA
from sklearn import metrics
#import prince
qq_max=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))[-1]
new_cluster='Q{}'.format(int(re.findall('\d+',qq_max)[0])+1)
#new_cluster='Q32'
qlist=code[cluster_qq]['qlist']
X=data[qlist]
# 去除所有态度题选择的分数都一样的用户(含仅有两个不同)
std_t=min(1.41/np.sqrt(len(qlist)),0.40) if len(qlist)>=8 else 0.10
X=X[X.T.std()>std_t]
index_bk=X.index#备份,方便还原
X.fillna(0,inplace=True)
X1=X.T
X1=(X1-X1.mean())/X1.std()
X1=X1.T.as_matrix()
if n_clusters == 'auto':
#聚类个数的选取和评估
silhouette_score=[]# 轮廊系数
SSE_score=[]
klist=np.arange(2,15)
for k in klist:
est = KMeans(k) # 4 clusters
est.fit(X1)
tmp=np.sum((X1-est.cluster_centers_[est.labels_])**2)
SSE_score.append(tmp)
tmp=metrics.silhouette_score(X1, est.labels_)
silhouette_score.append(tmp)
'''
fig = plt.figure(1)
ax = fig.add_subplot(111)
fig = plt.figure(2)
ax.plot(klist,np.array(silhouette_score))
ax = fig.add_subplot(111)
ax.plot(klist,np.array(SSE_score))
'''
# 找轮廊系数的拐点
ss=np.array(silhouette_score)
t1=[False]+list(ss[1:]>ss[:-1])
t2=list(ss[:-1]>ss[1:])+[False]
k_log=[t1[i]&t2[i] for i in range(len(t1))]
if True in k_log:
k=k_log.index(True)
else:
k=1
k=k if k<=max_clusters-2 else max_clusters-2 # 限制最多分7类
k_best=klist[k]
else:
k_best=n_clusters
est = KMeans(k_best) # 4 clusters
est.fit(X1)
# 系数计算
SSE=np.sqrt(np.sum((X1-est.cluster_centers_[est.labels_])**2)/len(X1))
silhouette_score=metrics.silhouette_score(X1, est.labels_)
print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1),len(qlist),k_best))
print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE,silhouette_score))
# 绘制降维图
'''
X_PCA = PCA(2).fit_transform(X1)
kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),
edgecolor='none', alpha=0.6)
labels=pd.Series(est.labels_)
plt.figure()
plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)
'''
'''
# 三维立体图
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)
'''
# 导出到原数据
parameters={'methods':'kmeans','inertia':est.inertia_,'SSE':SSE,'silhouette':silhouette_score,\
'n_clusters':k_best,'n_features':len(qlist),'n_samples':len(X1),'qnum':new_cluster,\
'data':X1,'labels':est.labels_}
data[new_cluster]=pd.Series(est.labels_,index=index_bk)
code[new_cluster]={'content':'态度题聚类结果','qtype':'单选题','qlist':[new_cluster],
'code':dict(zip(range(k_best),['cluster{}'.format(i+1) for i in range(k_best)]))}
print('结果已经存进数据, 题号为:{}'.format(new_cluster))
return data,code,parameters
'''
# 对应分析
t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()
t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]
t=t.rename(index=code[new_cluster]['code'])
ca=prince.CA(t)
ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)
'''
def scatter(data,legend=False,title=None,font_ch=None,find_path=None):
'''
绘制带数据标签的散点图
'''
import matplotlib.font_manager as fm
if font_ch is None:
fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','msyh.ttc']
myfont=''
if not find_path:
find_paths=['C:\\Windows\\Fonts','']
# fontlist 越靠后越优先,findpath越靠后越优先
for find_path in find_paths:
for f in fontlist:
if os.path.exists(os.path.join(find_path,f)):
myfont=os.path.join(find_path,f)
if len(myfont)==0:
print('没有找到合适的中文字体绘图,请检查.')
myfont=None
else:
myfont = fm.FontProperties(fname=myfont)
else:
myfont=fm.FontProperties(fname=font_ch)
fig, ax = plt.subplots()
#ax.grid('on')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color=['blue','red','green','dark']
if not isinstance(data,list):
data=[data]
for i,dd in enumerate(data):
ax.scatter(dd.iloc[:,0], dd.iloc[:,1], c=color[i], s=50,
label=dd.columns[1])
for _, row in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i],fontproperties=myfont,fontsize=10)
ax.axis('equal')
if legend:
ax.legend(loc='best')
if title:
ax.set_title(title,fontproperties=myfont)
return fig
def sankey(df,filename=None):
'''SanKey图绘制
df的列是左节点,行是右节点
注:暂时没找到好的Python方法,所以只生成R语言所需数据
返回links 和 nodes
# R code 参考
library(networkD3)
dd=read.csv('price_links.csv')
links<-data.frame(source=dd$from,target=dd$to,value=dd$value)
nodes=read.csv('price_nodes.csv',encoding = 'UTF-8')
nodes<-nodes['name']
Energy=c(links=links,nodes=nodes)
sankeyNetwork(Links = links, Nodes = nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh",fontSize = 20,fontFamily='微软雅黑',nodeWidth=20)
'''
nodes=['Total']
nodes=nodes+list(df.columns)+list(df.index)
nodes=pd.DataFrame(nodes)
nodes['id']=range(len(nodes))
nodes.columns=['name','id']
R,C=df.shape
t1=pd.DataFrame(df.as_matrix(),columns=range(1,C+1),index=range(C+1,R+C+1))
t1.index.name='to'
t1.columns.name='from'
links=t1.unstack().reset_index(name='value')
links0=pd.DataFrame({'from':[0]*C,'to':range(1,C+1),'value':list(df.sum())})
links=links0.append(links)
if filename:
links.to_csv(filename+'_links.csv',index=False,encoding='utf-8')
nodes.to_csv(filename+'_nodes.csv',index=False,encoding='utf-8')
return (links,nodes)
def table(data,code,total=True):
'''
单个题目描述统计
code是data的编码,列数大于1
返回字典格式数据:
'fop':百分比, 对于单选题和为1,多选题分母为样本数
'fo': 观察频数表,其中添加了合计项
'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有
'''
# 单选题
qtype=code['qtype']
index=code['qlist']
data=pd.DataFrame(data)
sample_len=data[code['qlist']].notnull().T.any().sum()
result={}
if qtype == u'单选题':
fo=data.iloc[:,0].value_counts()
if 'weight' in code:
w=pd.Series(code['weight'])
fo1=fo[w.index][fo[w.index].notnull()]
fw=(fo1*w).sum()/fo1.sum()
result['fw']=fw
fo.sort_values(ascending=False,inplace=True)
fop=fo.copy()
fop=fop/fop.sum()*1.0
fop[u'合计']=fop.sum()
fo[u'合计']=fo.sum()
if 'code' in code:
fop.rename(index=code['code'],inplace=True)
fo.rename(index=code['code'],inplace=True)
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo= | pd.DataFrame(fo) | pandas.DataFrame |
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table
from datasets.formatting.formatting import NumpyArrowExtractor, PandasArrowExtractor, PythonArrowExtractor
from datasets.table import InMemoryTable
from .utils import require_tf, require_torch
_COL_A = [0, 1, 2]
_COL_B = ["foo", "bar", "foobar"]
_COL_C = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
_INDICES = [1, 0]
class ArrowExtractorTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_extractor(self):
pa_table = self._create_dummy_table()
extractor = PythonArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]})
col = extractor.extract_column(pa_table)
self.assertEqual(col, _COL_A)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_numpy_extractor(self):
pa_table = self._create_dummy_table()
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])})
col = extractor.extract_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = extractor.extract_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)})
def test_numpy_extractor_np_array_kwargs(self):
pa_table = self._create_dummy_table().drop(["b"])
extractor = NumpyArrowExtractor(dtype=np.float16)
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"].dtype, np.dtype(np.float16))
col = extractor.extract_column(pa_table)
self.assertEqual(col.dtype, np.float16)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["a"].dtype, np.dtype(np.float16))
self.assertEqual(batch["c"].dtype, np.dtype(np.float16))
def test_pandas_extractor(self):
pa_table = self._create_dummy_table()
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
pd.testing.assert_series_equal(row["c"], pd.Series(_COL_C, name="c")[:1])
col = extractor.extract_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = extractor.extract_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
pd.testing.assert_series_equal(batch["c"], pd.Series(_COL_C, name="c"))
class FormatterTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_formatter(self):
pa_table = self._create_dummy_table()
formatter = PythonFormatter()
row = formatter.format_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]})
col = formatter.format_column(pa_table)
self.assertEqual(col, _COL_A)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_numpy_formatter(self):
pa_table = self._create_dummy_table()
formatter = NumpyFormatter()
row = formatter.format_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])})
col = formatter.format_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = formatter.format_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)})
def test_numpy_formatter_np_array_kwargs(self):
pa_table = self._create_dummy_table().drop(["b"])
formatter = NumpyFormatter(dtype=np.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, np.dtype(np.float16))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, np.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, np.dtype(np.float16))
self.assertEqual(batch["c"].dtype, np.dtype(np.float16))
def test_pandas_formatter(self):
pa_table = self._create_dummy_table()
formatter = PandasFormatter()
row = formatter.format_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
pd.testing.assert_series_equal(row["c"], | pd.Series(_COL_C, name="c") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test=pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1)
#determine corr
corr=dataset_int1.corr()
f,ax=plt.subplots(figsize=(7,5))
sns.heatmap(corr,mask=np.zeros_like(corr,dtype=np.bool),cmap=sns.diverging_palette(220,10,as_cmap=True),square=True,ax=ax)
# """feature engineering"""
#1.we will derive new features from pickup_datetime variable
#new features will be year,month,day_of_week,hour
dataset_train1= | pd.concat([dataset_int1,dataset_train["passenger_count"]],axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import click
import h5py
import os
import logging
from array import array
from copy import deepcopy
from tqdm import tqdm
from astropy.io import fits
from fact.credentials import create_factdb_engine
from zfits import FactFits
from scipy.optimize import curve_fit
from joblib import Parallel, delayed
import drs4Calibration.config as config
from drs4Calibration.constants import NRCHID, NRCELL, NRTEMPSENSOR, ROI, ADCCOUNTSTOMILIVOLT
from drs4Calibration.tools import safety_stuff
import matplotlib.pyplot as plt
from time import time
def print_delta_time(time, string=""):
hours = int(time / 3600)
rest = time % 3600
minutes = int(rest / 60)
seconds = round(rest % 60, 2)
print(string+" deltaTime: ", hours, minutes, seconds)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/drsFitsFiles.txt",
type=click.Path(exists=False))
def search_drs_fits_files(drs_file_list_doc_path: str):
'''
Search through the fact-database and store the path of all drsFiles
under the given storePath
Args:
drs_file_list_doc_path (str):
Full path to the storeFile
with the extension '.txt'
'''
# TODO check safety stuff. maybe remove
#safety_stuff(drs_file_list_doc_path)
def filename(row):
return os.path.join(
str(row.date.year),
"{:02d}".format(row.date.month),
"{:02d}".format(row.date.day),
"{}_{:03d}.fits.fz".format(row.fNight, row.fRunID),
)
# 40drs4320Bias
drs_infos = pd.read_sql(
"RunInfo",
create_factdb_engine(),
columns=[
"fNight", "fRunID",
"fRunTypeKey", "fDrsStep",
"fNumEvents"])
drs_file_infos = drs_infos.query("fRunTypeKey == 2 &" +
"fDrsStep == 2 &" +
"fNumEvents == 1000").copy()
# fNumEvents == 1000 prevent for unfinished/broken files
drs_file_infos["date"] = pd.to_datetime(drs_file_infos.fNight.astype(str),
format="%Y%m%d")
drs_files = drs_file_infos.apply(filename, axis=1).tolist()
pd.DataFrame(drs_files).to_csv(drs_file_list_doc_path, index=False,
header=False)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/selectedDrsFitsFiles.txt",
type=click.Path(exists=True))
@click.argument('store_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/newBaseline_timeTest.h5",
type=click.Path(exists=False))
@click.argument('source_folder_path',
default="/net/big-tank/POOL/projects/fact/drs4_calibration_data/",
type=click.Path(exists=False))
def store_drs_values(drs_file_list_doc_path, store_file_path, source_folder_path):
with h5py.File(store_file_path, 'w') as hf:
hf.create_dataset(
name="Time", dtype="float32",
shape=(0, 1), maxshape=(None, 1),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="Temperature", dtype="float32",
shape=(0, NRTEMPSENSOR), maxshape=(None, NRTEMPSENSOR),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="NewBaseline", dtype="float32",
shape=(0, NRCHID*NRCELL*ROI), maxshape=(None, NRCHID*NRCELL*ROI),
compression="gzip", compression_opts=9,
fletcher32=True)
class SourceDataSet:
# @resettable
run_begin = pd.to_datetime("")
run_end = | pd.to_datetime("") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 10:30:17 2018
@author: avelinojaver
"""
from tierpsy.features.tierpsy_features.summary_stats import get_summary_stats
from tierpsy.summary.helper import augment_data, add_trajectory_info
from tierpsy.summary.filtering import filter_trajectories
from tierpsy.helper.params import read_fps, read_microns_per_pixel
from tierpsy.helper.misc import WLAB,print_flush
from tierpsy.analysis.split_fov.helper import was_fov_split
from tierpsy.analysis.split_fov.FOVMultiWellsSplitter import FOVMultiWellsSplitter
import pandas as pd
import pdb
#%%
def time_to_frame_nb(time_windows,time_units,fps,timestamp,fname):
"""
Converts the time windows to units of frame numbers (if they were defined in seconds).
It also defines the end frame of a window, if the index is set to -1 (end).
"""
from copy import deepcopy
if timestamp.empty:
return
time_windows_frames = deepcopy(time_windows)
if time_units == 'seconds':
assert fps!=-1, 'Cannot convert time windows to frame numbers. Frames per second ratio not known.'
for iwin, win in enumerate(time_windows_frames):
for iinterval in range(len(win)):
for ilim in range(2):
if time_windows_frames[iwin][iinterval][ilim]!=-1:
time_windows_frames[iwin][iinterval][ilim] = \
round(time_windows_frames[iwin][iinterval][ilim]*fps)
last_frame = timestamp.sort_values().iloc[-1]
for iwin, win in enumerate(time_windows_frames):
for iinterval in range(len(win)):
# If a window ends with -1, replace with the frame number of the
# last frame (or the start frame of the window+1 if window out of bounds)
if time_windows_frames[iwin][iinterval][1]==-1:
time_windows_frames[iwin][iinterval][1] = \
max(last_frame+1, time_windows_frames[iwin][iinterval][0])
# If a window is out of bounds, print warning
if time_windows_frames[iwin][iinterval][0]>last_frame:
print_flush(
'Warning: The start time of interval '+
'{}/{} '.format(iinterval+1, len(win)) +
'of window {} '.format(iwin) +
'is out of bounds of file \'{}\'.'.format(fname))
return time_windows_frames
def no_attr_flush(attr, fname):
if attr=='fps':
out = ['seconds', 'frames_per_second', fname, 'frame numbers']
elif attr=='mpp':
out = ['microns', 'microns_per_pixel', fname, 'pixels']
print_flush(
"""
Warning: some of the summarizer input were given in {0}, but the {1}
ratio for file \'{2}\' is unknown. Give input in {3} instead.
""".format(*out)
)
return
def _no_fps(time_units, fps, fname):
if fps==-1:
if time_units=='seconds':
no_attr_flush('fps', fname)
return True
return False
def _match_units(filter_params, fps, fname):
"""
author: EM
The filtering thresholds must match the timeseries units. If the right
conversion is not possible, then check_ok is False, and the feature
summaries will not be calculated for this file.
"""
from copy import deepcopy
if filter_params is None:
return filter_params, True
all_units = filter_params['units']+[filter_params['time_units']]
cfilter_params = deepcopy(filter_params)
if fps==-1:
# In this case, all time-related timeseries will be in frames.
# If thresholds have been defined in seconds there is no way to convert.
if 'seconds' in all_units:
no_attr_flush('fps', fname)
return cfilter_params, False
else:
# In this case, all time-related timeseries will be in seconds.
# We always want the time_units for traj_length in frames
if filter_params['time_units']=='seconds' and \
filter_params['min_traj_length'] is not None:
cfilter_params['min_traj_length'] = \
filter_params['min_traj_length']*fps
# If the timeseries therholds are defined in seconds, no conversion is
# necessary
# If the timeseries thresholds are defined in frames, we need to convert
# to seconds
if 'frame_numbers' in filter_params['units']:
ids = [i for i,x in enumerate(filter_params['units']) if x=='frame_numbers']
for i in ids:
if filter_params['min_thresholds'][i] is not None:
cfilter_params['min_thresholds'][i]= \
filter_params['min_thresholds'][i]/fps
if filter_params['max_thresholds'][i] is not None:
cfilter_params['max_thresholds'][i]= \
filter_params['max_thresholds'][i]/fps
mpp = read_microns_per_pixel(fname)
if mpp==-1:
# In this case, all distance-related timeseries will be in pixels.
# If thresholds have been defined in microns there is no way to convert.
if 'microns' in all_units:
no_attr_flush('mpp', fname)
return cfilter_params, False
else:
# In this case, all distance-related timeseries will be in microns.
# If the timeseries threholds are defined in micorns, no conversion is
# necessary
# If the timeseries thresholds are defined in pixels, we need to convert
# to microns
if filter_params['distance_units']=='pixels' and \
filter_params['min_distance_traveled'] is not None:
cfilter_params['min_distance_traveled'] = \
filter_params['min_distance_traveled']*mpp
if 'pixels' in filter_params['units']:
ids = [i for i,x in enumerate(filter_params['units']) if x=='pixels']
for i in ids:
if filter_params['min_thresholds'][i] is not None:
cfilter_params['min_thresholds'][i]= \
filter_params['min_thresholds'][i]*mpp
if filter_params['max_thresholds'][i] is not None:
cfilter_params['max_thresholds'][i]= \
filter_params['max_thresholds'][i]*mpp
return cfilter_params, True
#%%
def read_data(fname, filter_params, time_windows, time_units, fps, is_manual_index):
"""
Reads the timeseries_data and the blob_features for a given file within every time window.
return:
timeseries_data_list: list of timeseries_data for each time window (length of lists = number of windows)
blob_features_list: list of blob_features for each time window (length of lists = number of windows)
"""
import numpy as np
# EM: If time_units=seconds and fps is not defined, then return None with warning of no fps.
# Make this check here, to avoid wasting time reading the file
if _no_fps(time_units, fps, fname):
return
cfilter_params, check_ok = _match_units(filter_params, fps, fname)
if not check_ok:
return
with pd.HDFStore(fname, 'r') as fid:
timeseries_data = fid['/timeseries_data']
blob_features = fid['/blob_features']
if is_manual_index:
#keep only data labeled as worm or worm clusters
valid_labels = [WLAB[x] for x in ['WORM', 'WORMS']]
trajectories_data = fid['/trajectories_data']
if not 'worm_index_manual' in trajectories_data:
#no manual index, nothing to do here
return
good = trajectories_data['worm_label'].isin(valid_labels)
good = good & (trajectories_data['skeleton_id'] >= 0)
skel_id = trajectories_data['skeleton_id'][good]
timeseries_data = timeseries_data.loc[skel_id]
timeseries_data['worm_index'] = trajectories_data['worm_index_manual'][good].values
timeseries_data = timeseries_data.reset_index(drop=True)
blob_features = blob_features.loc[skel_id].reset_index(drop=True)
if timeseries_data.empty:
#no data, nothing to do here
return
# convert time windows to frame numbers for the given file
time_windows_frames = time_to_frame_nb(
time_windows, time_units, fps, timeseries_data['timestamp'], fname
)
# EM: Filter trajectories
if cfilter_params is not None:
timeseries_data, blob_features = \
filter_trajectories(timeseries_data, blob_features, **cfilter_params)
if timeseries_data.empty:
#no data, nothing to do here
return
# EM: extract the timeseries_data and blob_features corresponding to each
# time window and store them in a list (length of lists = number of windows)
timeseries_data_list = []
blob_features_list = []
for window in time_windows_frames:
in_window = []
for interval in window:
in_interval = (timeseries_data['timestamp']>=interval[0]) & \
(timeseries_data['timestamp']<interval[1])
in_window.append(in_interval.values)
in_window = np.any(in_window, axis=0)
timeseries_data_list.append(timeseries_data.loc[in_window, :].reset_index(drop=True))
blob_features_list.append(blob_features.loc[in_window].reset_index(drop=True))
return timeseries_data_list, blob_features_list
def count_skeletons(timeseries):
cols = [col for col in timeseries.columns if col.startswith('eigen')]
return (~timeseries[cols].isna().any(axis=1)).sum()
#%%
def tierpsy_plate_summary(
fname, filter_params, time_windows, time_units,
only_abs_ventral=False, selected_feat=None,
is_manual_index=False, delta_time=1/3):
"""
Calculate the plate summaries for a given file fname, within a given time window
(units of start time and end time are in frame numbers).
"""
fps = read_fps(fname)
data_in = read_data(
fname, filter_params, time_windows, time_units, fps, is_manual_index)
# if manual annotation was chosen and the trajectories_data does not contain
# worm_index_manual, then data_in is None
# if time_windows in seconds and fps is not defined (fps=-1), then data_in is None
if data_in is None:
return [pd.DataFrame() for iwin in range(len(time_windows))]
timeseries_data, blob_features = data_in
# was the fov split in wells? only use the first window to detect that,
# and to extract the list of well names
is_fov_tosplit = was_fov_split(fname)
# is_fov_tosplit = False
if is_fov_tosplit:
fovsplitter = FOVMultiWellsSplitter(fname)
good_wells_df = fovsplitter.wells[['well_name','is_good_well']].copy()
# print(good_wells_df)
# initialize list of plate summaries for all time windows
plate_feats_list = []
for iwin,window in enumerate(time_windows):
if is_fov_tosplit == False:
plate_feats = get_summary_stats(
timeseries_data[iwin], fps, blob_features[iwin], delta_time,
only_abs_ventral=only_abs_ventral,
selected_feat=selected_feat
)
plate_feats['n_skeletons'] = count_skeletons(timeseries_data[iwin])
plate_feats_list.append(pd.DataFrame(plate_feats).T)
else:
# get list of well names in this time window
# (maybe some wells looked empty during a whole window,
# this prevents errors later on)
well_names_list = list(set(timeseries_data[iwin]['well_name']) - set(['n/a']))
# create a list of well-specific, one-line long dataframes
well_feats_list = []
for well_name in well_names_list:
# find entries in timeseries_data[iwin] belonging to the right well
idx_well = timeseries_data[iwin]['well_name'] == well_name
well_feats = get_summary_stats(
timeseries_data[iwin][idx_well].reset_index(), fps,
blob_features[iwin][idx_well].reset_index(), delta_time,
only_abs_ventral=only_abs_ventral,
selected_feat=selected_feat
)
well_feats['n_skeletons'] = count_skeletons(timeseries_data[iwin][idx_well])
# first prepend the well_name_s to the well_feats series,
# then transpose it so it is a single-row dataframe,
# and append it to the well_feats_list
well_name_s = pd.Series({'well_name':well_name})
well_feats_list.append(pd.DataFrame(pd.concat([well_name_s,well_feats])).T)
# check: did we find any well?
if len(well_feats_list) == 0:
plate_feats_list.append(pd.DataFrame())
else:
# now concatenate all the single-row df in well_feats_list in a single df
# and append it to the growing list (1 entry = 1 window)
plate_feats = pd.concat(well_feats_list, ignore_index=True, sort=False)
# import pdb; pdb.set_trace()
plate_feats = plate_feats.merge(good_wells_df,
on='well_name',
how='left')
plate_feats_list.append(plate_feats)
return plate_feats_list
def tierpsy_trajectories_summary(
fname, filter_params, time_windows, time_units,
only_abs_ventral=False, selected_feat=None,
is_manual_index=False, delta_time=1/3):
"""
Calculate the trajectory summaries for a given file fname, within a given time window
(units of start time and end time are in frame numbers).
"""
fps = read_fps(fname)
data_in = read_data(
fname, filter_params, time_windows, time_units, fps, is_manual_index)
if data_in is None:
return [pd.DataFrame() for iwin in range(len(time_windows))]
timeseries_data, blob_features = data_in
is_fov_tosplit = was_fov_split(fname)
# is_fov_tosplit = False
if is_fov_tosplit:
fovsplitter = FOVMultiWellsSplitter(fname)
good_wells_df = fovsplitter.wells[['well_name','is_good_well']].copy()
# print(good_wells_df)
# initialize list of summaries for all time windows
all_summaries_list = []
# loop over time windows
for iwin,window in enumerate(time_windows):
if timeseries_data[iwin].empty:
all_summary = pd.DataFrame([])
else:
# initialize list of trajectory summaries for given time window
all_summary = []
# loop over worm indexes (individual trajectories)
for w_ind, w_ts_data in timeseries_data[iwin].groupby('worm_index'):
w_blobs = blob_features[iwin].loc[w_ts_data.index]
w_ts_data = w_ts_data.reset_index(drop=True)
w_blobs = w_blobs.reset_index(drop=True)
worm_feats = get_summary_stats(
w_ts_data, fps, w_blobs, delta_time,
only_abs_ventral=only_abs_ventral,
selected_feat=selected_feat
) # returns empty dataframe when w_ts_data is empty
worm_feats['n_skeletons'] = count_skeletons(w_ts_data)
worm_feats = pd.DataFrame(worm_feats).T
worm_feats = add_trajectory_info(
worm_feats, w_ind, w_ts_data, fps,
is_fov_tosplit=is_fov_tosplit)
all_summary.append(worm_feats)
# concatenate all trajectories in given time window into one dataframe
all_summary = | pd.concat(all_summary, ignore_index=True, sort=False) | pandas.concat |
#
import numpy
import pandas
def _lag_it(frame, n_lags):
frame_ = frame.copy()
if frame_.index.nlevels == 1:
frame_ = frame_.shift(periods=n_lags, axis=0)
elif frame_.index.nlevels == 2:
for ix in frame_.index.levels[0]:
frame_.loc[[ix], :] = frame_.loc[[ix], :].shift(periods=n_lags, axis=0)
else:
raise NotImplemented()
return frame_
def lag_it(frame, n_lags, exactly=True, keep_basic=True, suffix='_LAG'):
if exactly:
if keep_basic:
new_columns = [x + suffix + '0' for x in frame.columns.values] + [x + suffix + str(n_lags) for x in
frame.columns.values]
frame = pandas.concat((frame, _lag_it(frame=frame, n_lags=n_lags)), axis=1)
frame.columns = new_columns
else:
new_columns = [x + suffix + str(n_lags) for x in frame.columns.values]
frame = _lag_it(frame=frame, n_lags=n_lags)
frame.columns = new_columns
else:
if keep_basic:
new_columns = [x + suffix + '0' for x in frame.columns.values]
frames = [frame]
else:
new_columns = []
frames = []
for j in numpy.arange(start=1, stop=(n_lags + 1)):
new_columns = new_columns + [x + suffix + str(j) for x in frame.columns.values]
frames.append(_lag_it(frame=frame, n_lags=j))
frame = | pandas.concat(frames, axis=1) | pandas.concat |
### Model Training and Evaluation ###
# Author: <NAME>
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os, shutil
import re
import csv
from utils import bigrams, trigram, replace_collocation
import timeit
import pandas as pd
import string
from nltk.stem import PorterStemmer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import string
from sklearn.feature_extraction.text import CountVectorizer
from gensim.test.utils import datapath
from gensim.models import Word2Vec
from data_concatenate import *
import gensim.downloader
import pprint
from manetm import etm
pp = pprint.PrettyPrinter()
# =============================================================================
DATAPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/")
OVERLEAF = os.path.expanduser("~/Dropbox/Apps/Overleaf/FOMC_Summer2019/files")
if not os.path.exists(f"{DATAPATH}/full_results"):
os.makedirs(f"{DATAPATH}/full_results")
# =============================================================================
# #0 Set Parameters
# =============================================================================
# Dataset parameters
embphrase_itera = 2 # Number of phrase iterations
embthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
emb_max_df = 1.0 # in a maximum of # % of documents if # is float.
emb_min_df = 1 # choose desired value for min_df // in a minimum of # documents
EMBDATASET = f"BBTSST_min{emb_min_df}_max{emb_max_df}_iter{embphrase_itera}_th{embthreshold}"
meetphrase_itera = 2
meetthreshold = "inf"
meetmax_df = 1.0
meetmin_df = 10
MEEETDATA = f"MEET_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
sta_phrase_itera = 2
sta_threshold = "inf"
sta_max_df = 1.0
sta_min_df = 5
STADATASET = f"STATEMENT_min{sta_min_df}_max{sta_max_df}_iter{sta_phrase_itera}_th{sta_threshold}"
# Skipgram parameters
mincount = 2
d_sg = 1
vectorsize = 300
iters = 100
cpus = 16
neg_samples = 10
windowsize = 4
# Activate code
d_construct = False
d_estemb = False
d_train = False
# =============================================================================
# #1 Data Preparation
# =============================================================================
if d_construct:
print("*" * 80)
print("Build datasets")
build_embdata(emb_max_df,emb_min_df,embphrase_itera,embthreshold,EMBDATASET)
build_meeting(meetmax_df,meetmin_df,meetphrase_itera,meetthreshold,MEEETDATA)
build_statement_data(sta_max_df,sta_min_df,sta_phrase_itera,sta_threshold,STADATASET)
print("*" * 80)
print("Datasets Construction Completed")
print("*" * 80)
print("\n")
# =============================================================================
# #2 Train Word Embeddings
# =============================================================================
if d_estemb:
# Run Skipgram
print(f"Run model: {EMBDATASET}\n")
sentences = pd.read_pickle(f"{DATAPATH}/data/{EMBDATASET}/corpus.pkl")
model = gensim.models.Word2Vec(sentences, min_count = mincount, sg = d_sg, vector_size = vectorsize, epochs = iters, workers = cpus, negative = neg_samples, window = windowsize)
model.save(f"{DATAPATH}/word2vecmodels/{EMBDATASET}")
# Write the embeddings to a file
with open(f"{DATAPATH}/embeddings/{EMBDATASET}_emb", 'w') as f:
for v in model.wv.index_to_key:
vec = list(model.wv[v])
f.write(v + ' ')
vec_str = ['%.9f' % val for val in vec]
vec_str = " ".join(vec_str)
f.write(vec_str + '\n')
print("*" * 80)
print(f"Embedding Training Completed")
print("*" * 80)
print("\n\n")
# =============================================================================
## #4 TRAIN TOPIC MODELS
# =============================================================================
# =============================================================================
## SPEAKERDATA - Pre-Trained Emb.
# speaker_ckpt = etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# batch_size = 1000, epochs = 150, num_topics = 10, rho_size = 300,
# emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
# train_embeddings = 0, lr = 0.005, lr_factor=4.0,
# mode = 'train', optimizer = 'adam',
# seed = 2019, enc_drop = 0.0, clip = 0.0,
# nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
# num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
# load_from = "", tc = 1, td = 1)
#
# print(f"Evaluate model: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'eval', load_from = f"{speaker_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
#
# print(f"Output the topic distribution: {speaker_ckpt}")
# etm(f"{SPEAKERDATA}",data_path=f"{DATAPATH}/data/{SPEAKERDATA}",
# emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
# mode = 'retrieve',load_from = f"{speaker_ckpt}", train_embeddings = 0)
#
# =============================================================================
## MEETINGS - Pre-Trained Emb.
if d_train:
meeting_ckpt = etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
batch_size = 1000, epochs = 2000, num_topics = 10, rho_size = 300,
emb_size = 300, t_hidden_size = 800, theta_act = 'relu',
train_embeddings = 0, lr = 0.005, lr_factor=4.0,
mode = 'train', optimizer = 'adam',
seed = 2019, enc_drop = 0.0, clip = 0.0,
nonmono = 10, wdecay = 1.2e-6, anneal_lr = 0, bow_norm = 1,
num_words =10, log_interval = 2, visualize_every = 10, eval_batch_size = 1000,
load_from = "", tc = 1, td = 1)
print(f"Evaluate model: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'eval', load_from = f"{meeting_ckpt}", train_embeddings = 0 ,tc = 1, td = 1)
print(f"Output the topic distribution: {meeting_ckpt}")
etm(f"{MEEETDATA}",data_path=f"{DATAPATH}/data/{MEEETDATA}",
emb_path=f"{DATAPATH}/embeddings/{EMBDATASET}_emb",save_path=f"{DATAPATH}/results",
mode = 'retrieve',load_from = f"{meeting_ckpt}", train_embeddings = 0)
# =============================================================================
## #5 OUTPUT DATA
# =============================================================================
# =============================================================================
# ## SPEAKERDATA
# raw_df = pd.read_pickle(f"raw_data/{SPEAKERDATA}.pkl")
#
# idx_df = pd.read_pickle(f'{OUTPATH}/{SPEAKERDATA}/original_indices.pkl')
# idx_df = idx_df.set_index(0)
# idx_df["d"] = 1
#
# data = pd.concat([idx_df,raw_df],axis=1)
# data_clean = data[data["d"]==1].reset_index()
# dist_df = pd.read_pickle(f'{speaker_ckpt}tpdist.pkl')
#
# full_data = pd.concat([data_clean,dist_df],axis=1)
# full_data.drop(columns=["content","d"],inplace=True)
# full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
# full_data["start_date"] = pd.to_datetime(full_data["start_date"])
# full_data.to_stata(f"{DATAPATH}/full_results/{SPEAKERDATA}.dta",convert_dates={"start_date":"td"})
#
# =============================================================================
### MEETING ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEEETDATA}.pkl")
idx_df = pd.read_pickle(f'{OUTPATH}/{MEEETDATA}/original_indices.pkl')
idx_df = idx_df.set_index(0)
idx_df["d"] = 1
data = pd.concat([idx_df,raw_df],axis=1)
data_clean = data[data["d"]==1].reset_index()
dist_df = pd.read_pickle(f'{meeting_ckpt}tpdist.pkl')
full_data = pd.concat([data_clean,dist_df],axis=1)
full_data.drop(columns=["content"],inplace=True)
full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
full_data["date"] = full_data["start_date"]
full_data.to_stata(f"{DATAPATH}/full_results/{MEEETDATA}.dta",convert_dates={"date":"td"})
full_data.to_pickle(f"{DATAPATH}/full_results/{MEEETDATA}.pkl")
### MEETING SAMPLED ###
# Retrieve raw data
raw_df = pd.read_pickle(f"raw_data/{MEETDATASAMPLE}.pkl")
idx_df = pd.read_pickle(f'{OUTPATH}/{MEETDATASAMPLE}/original_indices.pkl')
idx_df = idx_df.set_index(0)
idx_df["d"] = 1
data = pd.concat([idx_df,raw_df],axis=1)
data_clean = data[data["d"]==1].reset_index()
dist_df = pd.read_pickle(f'{meeting_ckptsampled}tpdist.pkl')
full_data = pd.concat([data_clean,dist_df],axis=1)
full_data.drop(columns=["content"],inplace=True)
full_data.rename(columns=dict(zip([i for i in range(10)],[f"topic_{i}" for i in range(10)])),inplace=True)
full_data["date"] = pd.to_datetime(full_data["date"])
full_data.to_stata(f"{DATAPATH}/full_results/{MEETDATASAMPLE}.dta",convert_dates={"date":"td"})
full_data.to_pickle(f"{DATAPATH}/full_results/{MEETDATASAMPLE}.pkl")
# =============================================================================
# ## 6 Visualize
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
meetphrase_itera = 2 # Number of phrase iterations
meetthreshold = "inf" # Threshold value for collocations. If "inf": no collocations
meetmax_df=1.0
meetmin_df=10
MEEETDATA = f"MEET_min{meetmin_df}_max{meetmax_df}_iter{meetphrase_itera}_th{meetthreshold}"
# Load data
full_data = pd.read_pickle(f"{DATAPATH}/full_results/{MEEETDATA}.pkl")
full_data.rename(columns=dict(zip([f"topic_{k}" for k in range(10)],[f"topic_{k+1}" for k in range(10)] )),inplace=True)
meeting_ckpt = f"{DATAPATH}/results/etm_MEET_min10_max1.0_iter2_thinf_K_10_Htheta_800_Optim_adam_Clip_0.0_ThetaAct_relu_Lr_0.005_Bsz_1000_RhoSize_300_trainEmbeddings_0"
# Retrieve topics
with open(f'{meeting_ckpt}topics.pkl', 'rb') as f:
meet_topics = pickle.load(f)
top_dic = dict(zip([item[0] for item in meet_topics ],[", ".join(item[1]) for item in meet_topics ] ))
# Check topics
for item in meet_topics:
print(f'{item[0]+1}: {", ".join(item[1])}')
section1 = full_data[full_data["Section"]==1].copy()
section2 = full_data[full_data["Section"]==2].copy()
k = 0
for k in range(1,11):
fig = plt.figure(figsize=(20,9))
axs = fig.add_subplot(1,1,1)
plt.subplots_adjust(.1,.20,1,.95)
section1.plot.scatter('start_date',f'topic_{k}',color="dodgerblue",ax=axs,label="Section 1")
section2.plot.scatter('start_date',f'topic_{k}',color="red",ax=axs,label="Section 2")
plt.figtext(0.10, 0.05, f"Topic {k} words: {top_dic[k-1]}", ha="left", fontsize=20)
axs.set_xlabel("Meeting Day",fontsize=20)
axs.set_ylabel(f"Topic {k}",fontsize=20)
axs.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, p: f"{x:.2f}"))
axs.grid(linestyle=':')
axs.tick_params(which='both',labelsize=20,axis="y")
axs.tick_params(which='both',labelsize=20,axis="x")
axs.legend( prop={'size': 20})
plt.savefig(f'output/transcript_topic_{k}.pdf')
try:
#plt.savefig(f'{OVERLEAF}/files/transcript_topic_{k}.eps', format='eps')
plt.savefig(f'{OVERLEAF}/transcript_topic_{k}.pdf')
except:
print("Invalid Overleaf Path")
# Meetings Sampled
# Retrieve topics
full_data = pd.read_pickle(f"{DATAPATH}/full_results/{MEETDATASAMPLE}.pkl")
full_data.rename(columns=dict(zip([f"topic_{k}" for k in range(12)],[f"topic_{k+1}" for k in range(12)] )),inplace=True)
with open(f'{meeting_ckptsampled}topics.pkl', 'rb') as f:
meet_topics = pickle.load(f)
top_dic = dict(zip([item[0] + 1 for item in meet_topics ],[", ".join(item[1]) for item in meet_topics ] ))
# Check topics
for item in top_dic.keys():
print(f'{item}: {top_dic[item]}')
section1 = full_data[full_data["Section"]==1].copy()
section2 = full_data[full_data["Section"]==2].copy()
k = 0
for k in range(1,11):
fig = plt.figure(figsize=(20,9))
axs = fig.add_subplot(1,1,1)
plt.subplots_adjust(.1,.20,1,.95)
section1.plot.scatter('date',f'topic_{k}',color="dodgerblue",ax=axs,label="Section 1")
section2.plot.scatter('date',f'topic_{k}',color="red",ax=axs,label="Section 2")
plt.figtext(0.10, 0.05, f"Topic {k} words: {top_dic[k]}", ha="left", fontsize=20)
axs.set_xlabel("Meeting Day",fontsize=20)
axs.set_ylabel(f"Topic {k}",fontsize=20)
axs.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, p: f"{x:.2f}"))
axs.grid(linestyle=':')
axs.tick_params(which='both',labelsize=20,axis="y")
axs.tick_params(which='both',labelsize=20,axis="x")
axs.legend( prop={'size': 20})
plt.savefig(f'output/transcriptsampled_topic_{k}.pdf')
try:
#plt.savefig(f'{OVERLEAF}/files/transcript_topic_{k}.eps', format='eps')
plt.savefig(f'{OVERLEAF}/transcriptsampled_topic_{k}.pdf')
except:
print("Invalid Overleaf Path")
# =============================================================================
# ## 7 MN Logit
import statsmodels.api as sm
import pandas as pd
import numpy as np
DATAPATH = os.path.expanduser("~/Dropbox/MPCounterfactual/src/etm/")
OVERLEAF = os.path.expanduser("~/Dropbox/Apps/Overleaf/FOMC_Summer2019/files")
topics = pd.read_stata("full_results/MEET_min10_max1.0_iter2_thinf.dta")
topics.rename(columns=dict(zip([f"topic_{k}" for k in range(10)],[f"topic_{k+1}" for k in range(10)] )),inplace=True)
topics.drop(columns=["level_0","index", "d"],inplace=True)
topics = topics[topics["start_date"]!="2009-09-16"]
econdata = pd.read_pickle("../economic_data/final_data/econmarketdata.pkl")
data = topics.merge(econdata,left_on="start_date",right_on="date",how="inner")
for k in range(1,11):
data[f"lns{k}s5"] = np.log(data[f"topic_{k}"]) - np.log(data[f"topic_5"])
for k in range(1,11):
data[f"s{k}s5"] = data[f"topic_{k}"] / data[f"topic_5"]
data["constant"] = 1
covs = "l1d_UNRATE l2d_UNRATE l1dln_PCEPI l2dln_PCEPI l1dln_INDPRO l2dln_INDPRO d14ln_spindx d28_SVENY01 d28_SVENY10 TEDRATE SVENY01 SVENY10 BAA10Y AAA10Y"
covs_list = covs.split(" ")
est_section1 = data.loc[data["Section"] == 1,[f"s{k}s5" for k in range(1,11) ]]
res_df = | pd.DataFrame([]) | pandas.DataFrame |
import batman
import ellc
import torch
import numpy as np
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from time import time
from pytransit import OblateStarModel, QuadraticModel
from data_preparation.data_processing_utils import min_max_norm_vectorized, resize, standardize
R_SUN2JUPYTER = 1.0 / 0.10045
M_EARTH_KG = 5.9723e24
M_SUN_KG = 1.9884e30
M_JUPYTER = 1.89813e27
M_JUPYTER2Sun = M_JUPYTER / M_SUN_KG
M_EARTH2SUN = M_EARTH_KG / M_SUN_KG
AU2kKM = 149597870.7
R_SUN = 696340.0
R_EARTH2SUN = 6371.0 / R_SUN
def prob_decrease(max_spots, mass=0.5, decay=0.5):
assert 0.0 < decay < 1.0, "Invalid decay value! Must be between 0 and 1."
probs = []
current = mass
for i in range(max_spots):
current -= current * decay
probs.append(current)
remaining_mass = mass-sum(probs)
return [prob + (remaining_mass/len(probs)) for prob in probs]
def sample_spot_parameters_realistic(b=0., max_spots=4, max_size=20., spotless_prob=0.0, latitude_offset_prob=0.5,
latitude_offset_std=0.1):
p = prob_decrease(max_spots, mass=1 - spotless_prob, decay=0.5)
p.insert(0, spotless_prob)
num_spots = np.random.choice(range(max_spots + 1), p=p)
if num_spots == 0:
return None
spot_params = np.empty((4, num_spots))
longitude_range = np.linspace(-60., 60, 360)
for s, spot in enumerate(range(num_spots)):
if np.random.choice([True, False], p=[latitude_offset_prob, 1.-latitude_offset_prob]):
offset = np.random.normal(0, latitude_offset_std)
else:
offset = 0.
#latitude = (b+offset)*90.
latitude = -b * 60. + np.random.uniform(-5., 5.)
longitude = np.random.choice(longitude_range)
size = np.random.uniform(2., max_size-(5.*s))
used_longitude = np.logical_and(longitude-size >= longitude_range, longitude_range <= longitude+size)
longitude_range = longitude_range[~used_longitude]
brightness = np.random.uniform(0.7, 1.3)
spot_params[0, s] = longitude
spot_params[1, s] = latitude
spot_params[2, s] = size
spot_params[3, s] = brightness
return spot_params
def extract_parameters(path="All_Exoplanets_Params.csv", transit__method_only=True,
params_essential=('pl_orbper', 'pl_rade', 'pl_orbsmax', 'pl_orbincl', 'st_rad'),
params_optional=('pl_trandur', 'pl_orbeccen', 'pl_orblper', 'st_teff', 'st_logg', 'st_met')):
exos = pd.read_csv(path, comment='#', sep=',')
if transit__method_only:
exos = exos[exos['discoverymethod'] == 'Transit']
if params_essential is None:
params_essential = ['pl_orbper', 'pl_trandur', 'pl_rade', 'pl_orbsmax', 'pl_orbeccen',
'pl_orbincl', 'st_rad']
if params_optional is None:
params_optional = []
param_names = list(params_essential) + list(params_optional)
exos_selected = exos.loc[:, param_names]
# convert unit of 'a' from AU to ratio of stellar radii
exos_selected.loc[:, 'pl_orbsmax'] = (AU2kKM * exos_selected['pl_orbsmax']) / (R_SUN * exos_selected['st_rad'])
valid_exos = exos_selected.dropna(subset=params_essential)
return valid_exos.where(pd.notnull(valid_exos), None)
def bin_parameters_by_impact(param_df, uniform_impact_bins=10):
bin_edges = np.linspace(0, 1, uniform_impact_bins)
bins = {bin_edge: [] for bin_edge in bin_edges}
for r_idx, row in param_df.iterrows():
a = row.get('pl_orbsmax')
i = row.get('pl_orbincl')
if a is None:
a = np.random.uniform(2.3, 30.)
if i is None:
b = np.random.uniform(0., 1.)
i = np.arccos(b / a) * 180 / np.pi
# calculate impact parameter
b = a * np.cos(i * np.pi / 180)
# determine closest bin edge
bin_idx = np.abs(bin_edges - b).argmin()
bins[bin_edges[bin_idx]].append(row)
return bins
def get_valid_range(value, constraints=('t_eff',), quantity='logg', limb_model='quad'):
assert all([constraint in ('t_eff', 'logg', 'met') for constraint in constraints]),\
f"Unknown value in constraints for argument constraint! Must only contain ('t_eff', 'logg' 'met')."
assert quantity in ('t_eff', 'logg', 'met'),\
f"Unknown value {quantity} for argument quantity! Must be one of ('t_eff', 'logg' 'met')."
assert all([constraint != quantity for constraint in constraints]), "Argument constraints must not contain quantity!"
assert len(value) == len(constraints), f"Arguments value and constraints have different lengths {len(value)} and " \
f"{len(constraints)}. You need to provide a value for each constraint!"
if limb_model == 'claret':
table = pd.read_csv("TESS_Nonlinear_Limb_Darkening_Atlas.csv", comment='#', sep=',')
elif limb_model == 'quad':
table = pd.read_csv("TESS_Quadratic_Limb_Darkening_Atlas.csv", comment='#', sep=',')
else:
raise RuntimeError("Unknown limb-darkening model use one of ['quad', 'claret']")
translate_arguments = lambda x: 'Teff [K]' if x == 't_eff' else 'logg [cm/s2]' if x == 'logg' else 'Z [Sun]'
constraints_translated = [translate_arguments(constraint) for constraint in constraints]
quantity_translated = translate_arguments(quantity)
constraint_results = table
for c, constraint in enumerate(constraints_translated):
match_idx = (constraint_results[constraint] - value[c]).abs().argmin()
matched_value = constraint_results[constraint].iloc[match_idx]
constraint_results = constraint_results[constraint_results[constraint] == matched_value]
joined_constraints = set(constraint_results[quantity_translated])
if len(joined_constraints) > 0:
return list(joined_constraints)
else:
return [None]
def match_stellar_params_with_table(table, T_eff, logg, met):
T_eff_match = (table['Teff [K]'] - T_eff).abs().argmin()
T_eff = table['Teff [K]'].iloc[T_eff_match]
logg_match = (table['logg [cm/s2]'] - logg).abs().argmin()
logg = table['logg [cm/s2]'].iloc[logg_match]
met_match = (table['Z [Sun]'] - met).abs().argmin()
met = table['Z [Sun]'].iloc[met_match]
candidates = table.loc[(table['Teff [K]'] == T_eff) & (table['logg [cm/s2]'] == logg) & (table['Z [Sun]'] == met)]
if len(candidates) > 1:
candidates = candidates.loc[candidates['xi [km/s]'] == 2.0]
return candidates
def lookup_limb_darkening(T_eff, logg, met, limb_model='claret', model='Atlas'):
if model == 'Atlas':
if limb_model == 'claret':
table = pd.read_csv("TESS_Nonlinear_Limb_Darkening_Atlas.csv", comment='#', sep=',')
candidates = match_stellar_params_with_table(table, T_eff, logg, met)
if candidates.empty:
return None
else:
return candidates['a1LSM'].item(), candidates['a2LSM'].item(), candidates['a3LSM'].item(), candidates['a4LSM'].item()
elif limb_model == 'quad':
table = pd.read_csv("TESS_Quadratic_Limb_Darkening_Atlas.csv", comment='#', sep=',')
candidates = match_stellar_params_with_table(table, T_eff, logg, met)
if candidates.empty:
return None
else:
return candidates['aLSM'].item(), candidates['bLSM'].item()
else:
raise RuntimeError("Unknown limb-darkening model use one of ['quad', 'claret']")
else:
raise RuntimeError("Currently Atlas is the only Model implemented! Please use model='Atlas'")
def lookup_gravity_darkening(T_eff, logg, met):
table = | pd.read_csv("TESS_Gravity_Darkening.csv", comment='#', sep=',') | pandas.read_csv |
#!/usr/bin/env python3
# coding: utf-8
"""Global surveillance data for the home page
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import argparse
import datetime
import json
import pandas as pd
import numpy as np
from scipy.stats import linregress
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--case-data", type=str, required=True, help="Path to case data JSON file",
)
parser.add_argument(
"--metadata-map", type=str, required=True, help="Path to metadata map JSON file"
)
parser.add_argument(
"--start-date-days-ago",
type=int,
default=90,
help="Number of days before today to filter data. Default: 90",
)
parser.add_argument(
"--start-date",
type=str,
default=None,
help="Start date for filtering data in ISO format (YYYY-MM-DD). Overrides --start-date-days-ago if defined. Default: None",
)
parser.add_argument(
"--end-date-days-ago",
type=int,
default=30,
help="Number of days before today to cut off data prior to regressions. Default: 30",
)
parser.add_argument(
"--end-date",
type=str,
default=None,
help="End date for filtering data in ISO format (YYYY-MM-DD). Overrides --end-date-days-ago if defined. Default: None",
)
parser.add_argument(
"--min-combo-count",
type=int,
default=50,
help="Minimum counts for a spike mutation combo to be included in the dataset",
)
parser.add_argument(
"--min-single-count",
type=int,
default=50,
help="Minimum counts for a single spike mutation to be included in the dataset",
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to output directory",
)
args = parser.parse_args()
case_data = | pd.read_json(args.case_data) | pandas.read_json |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns,
[('z', 'numeric'), ('a', 'categorical'),
('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids,
('c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(
md.ids, ('©id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('↩c@l1™', 'categorical'),
('col(#2)', 'categorical'),
("#col'3", 'categorical'),
('"<col_4>"', 'categorical'),
('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'),
('NA', 'numeric'),
('col3', 'categorical'),
('col4', 'categorical')])
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object,
name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'),
('1000', 'categorical'),
('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(
pd.DataFrame({'col0': [1.0, 2.0, 3.0],
'col1': ['a', 'b', 'c'],
'col2': ['foo', 'bar', '42'],
'col3': ['1.0', '2.5', '-4.002'],
'col4': [1, 2, 3],
'col5': [1, 2, 3.5],
'col6': [1e-4, -0.0002, np.nan],
'col7': ['cat', np.nan, 'dog'],
'col8': ['a', 'a', 'a'],
'col9': [0, 0, 0]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'),
('col1', 'categorical'),
('col2', 'categorical'),
('col3', 'categorical'),
('col4', 'numeric'),
('col5', 'numeric'),
('col6', 'numeric'),
('col7', 'categorical'),
('col8', 'categorical'),
('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'],
'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', ' bar ', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], ' col2 ': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
class TestSourceArtifacts(unittest.TestCase):
def setUp(self):
self.md = Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_source_artifacts(self):
self.assertEqual(self.md.artifacts, ())
def test_add_zero_artifacts(self):
self.md._add_artifacts([])
self.assertEqual(self.md.artifacts, ())
def test_add_artifacts(self):
# First two artifacts have the same data but different UUIDs.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
self.md._add_artifacts([artifact1])
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact3 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact2, artifact3])
self.assertEqual(self.md.artifacts, (artifact1, artifact2, artifact3))
def test_add_non_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
with self.assertRaisesRegex(TypeError, "Artifact object.*42"):
self.md._add_artifacts([artifact, 42])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, ())
def test_add_duplicate_artifact(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact2 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact1, artifact2])
with self.assertRaisesRegex(
ValueError, "Duplicate source artifacts.*artifact: Mapping"):
self.md._add_artifacts([artifact1])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, (artifact1, artifact2))
class TestRepr(unittest.TestCase):
def test_singular(self):
md = Metadata(pd.DataFrame({'col1': [42]},
index=pd.Index(['a'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 1 column', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
def test_plural(self):
md = Metadata(pd.DataFrame({'col1': [42, 42], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('2 IDs x 2 columns', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
self.assertIn("col2: ColumnProperties(type='categorical')", obs)
def test_column_name_padding(self):
data = [[0, 42, 'foo']]
index = pd.Index(['my-id'], name='id')
columns = ['col1', 'longer-column-name', 'c']
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 3 columns', obs)
self.assertIn(
"col1: ColumnProperties(type='numeric')", obs)
self.assertIn(
"longer-column-name: ColumnProperties(type='numeric')", obs)
self.assertIn(
"c: ColumnProperties(type='categorical')", obs)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index,
columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index,
columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2)
class TestToDataframe(unittest.TestCase):
def test_minimal(self):
df = pd.DataFrame({}, index=pd.Index(['id1'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='#SampleID'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.index.name, '#SampleID')
def test_dataframe_copy(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertIsNot(obs, df)
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
obs = md.to_dataframe()
| pd.testing.assert_frame_equal(obs, df) | pandas.testing.assert_frame_equal |
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
from monty.serialization import loadfn, dumpfn
g_csvpd = loadfn('../data/g_mae_corrsvpd_opts.json')
h_csvpd = loadfn('../data/h_mae_corrsvpd_opts.json')
s_csvpd = loadfn('../data/s_mae_corrsvpd_opts.json')
g_svpd = loadfn('../data/g_mae_svpd_opts.json')
g_tzvpd = loadfn('../data/g_mae_tzvpd_opts.json')
e_tzvpd = loadfn('../data/elec_mae_tzvpd_opts.json')
method_classes = {"GGA": ["PBE", "PBE-D3(BJ)", "BLYP", "BLYP-D3(BJ)", "B97-D",
"B97-D3", "mPW91", "mPW91-D3(BJ)", "VV10", "rVV10"],
"meta-GGA": ["M06-L", "M06-L-D3(0)", "SCAN", "SCAN-D3(BJ)",
"TPSS", "TPSS-D3(BJ)", "MN12-L",
"MN12-L-D3(BJ)", "B97M-rV"],
"hybrid GGA": ["PBE0", "PBE0-D3(BJ)", "B3LYP",
"B3LYP-D3(BJ)", "CAM-B3LYP",
"CAM-B3LYP-D3(0)", "mPW1PW91",
"mPW1PW91-D3(BJ)", "wB97X", "wB97XD",
"wB97XD3", "wB97XV"],
"hybrid meta-GGA": ["M06-2X", "M06-2X-D3(0)", "M06-HF",
"M08-SO", "M11", "MN15", "BMK",
"BMK-D3(BJ)", "TPSSh", "TPSSh-D3(BJ)",
"SCAN0", "mPWB1K", "mPWB1K-D3(BJ)",
"wB97M-V"]}
methods = list(np.concatenate([v for k, v in method_classes.items()]).flat)
sorted_g = {k: v for k, v in sorted(g_tzvpd.items(), key=lambda item: item[1])}
sorted_methods = [k for k, v in sorted_g.items()]
# x_labels = ['G_csvpd', 'H_csvpd', 'S_csvpd']
# x_labels = ['G_svpd', 'G_csvpd', 'G_tzvpd']
x_labels = ['E_tzvpd', 'G_tzvpd']
table_dict = {x: [] for x in x_labels}
for m in sorted_methods:
for d, x in zip([e_tzvpd, g_tzvpd], x_labels): #manually change for
# different ones
for k, v in d.items():
if m == k:
table_dict[x].append(v)
info_dict = {}
for k, v in table_dict.items():
minv = min(v)
maxv = max(v)
info_dict[k] = [minv, maxv]
df = pd.DataFrame(table_dict)
# A lot of the next code is to give columsn their own heatmaps,
# if we do G,H,S or something like that with different scales
df_info = | pd.DataFrame(info_dict) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
from argparse import ArgumentParser
import statsmodels.formula.api as smf
import math
# Function to compute effect sizes
# Based on method in Nakagawa, S. and <NAME>. (2007). Biol. Rev. 82. pp. 591-605.
def cohensd(t, df, n1, n2):
d = ( t * (n1+n2) ) / (math.sqrt(n1*n2) * math.sqrt(df))
se = math.sqrt( ((n1+n2-1)/(n1+n2-3)) * ( (4/(n1+n2)) * (1 + (d**2)/8) ))
return {'d': d, 'se': se, 'lower_ci': d-1.96*se, 'upper_ci': d+1.96*se}
def main():
print("Fitting models...")
# Parse command line arguments
description = "Data harmonization mini-pipeline for Brainhack School 2020, stage 3. Reads harmonzied data, fits models."
parser = ArgumentParser(__file__, description)
parser.add_argument("harmonized_data", action="store",
help = "Path to harmonized data .csv file. File must include columns noted in documentation.")
parser.add_argument("unharmonized_data", action="store",
help = "Path to unharmonized data .csv file. File must include columns noted in documentation.")
parser.add_argument("X", action = "store", help = "Name of column for regressor of interest.")
parser.add_argument("X_control_level", action = 'store', help = "Level of X to be considered control")
parser.add_argument("covars", action = "store", help = "Comma-separated column names of covariates.")
parser.add_argument("site", action = "store", help = "Column name for site variable (used for models on non-harmonized data).")
parser.add_argument("output_dir", action = "store", help = "Directory for all temporary output.")
parser.add_argument("output_file", action="store", help = "Name of file to to output the effect size measures.")
parser.add_argument("output_names", action="store", help = "Name of file to store display names of effect size measure batches.")
cl_args = parser.parse_args()
# Extract covariate columns from input arguments
covars = cl_args.covars.split(',')
X = cl_args.X
site = cl_args.site
output_file = cl_args.output_file
output_names = cl_args.output_names
output_dir = cl_args.output_dir
X_control_level = cl_args.X_control_level
print("... X: ", X)
print("... covars: ", covars)
print("... site: ", site)
# Read harmonized data
harmonized_features = pd.read_csv(cl_args.harmonized_data, index_col=0)
unharmonized_data = pd.read_csv(cl_args.unharmonized_data, index_col=0)
# TODO: fix for loop hard-coded -5!!!
features = harmonized_features.columns[:-5]
# ----------- Model 1: Linear regression on harmonized data -----------
print('... fitting linear models on harmonized data.')
models = pd.Series(dtype='object')
model_es = []
covar_string = ' + '.join(covars)
for nucleus in features: # Don't include last (covariate) columns
formula_string = nucleus + ' ~ ' + X + ' + ' + covar_string
models[nucleus] = smf.ols(formula = formula_string, data = harmonized_features).fit()
# Compute Cohens d effect sizes.
n_control = sum(harmonized_features[X] == X_control_level)
n_ASD = models[nucleus].nobs - n_control
model_es.append(cohensd(t = models[nucleus].tvalues[X+'[T.Control]'],
df = models[nucleus].df_resid,
n1 = n_control,
n2 = n_ASD))
# Create lists to collect temporary output file names, and output this batch of effect sizes.
combat_es = | pd.DataFrame(model_es, index=features) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import IsolationForest
import STRING
from sklearn.preprocessing import StandardScaler
def isolation_forest(x, y, contamination=0.1, n_estimators=50, bootstrap=True, max_features=0.33, validation=[]):
if contamination == 'auto':
contamination = y.mean()
print('Contamination Automatized to: %.2f\n' % contamination)
db = IsolationForest(n_estimators=n_estimators, max_samples=500,
bootstrap=bootstrap, verbose=1, random_state=42,
contamination=contamination, max_features=max_features)
db.fit(x)
labels = db.predict(x)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print('CLUSTER NUMBERS ', n_clusters_)
print(labels)
labels = pd.DataFrame(labels, columns=['outliers'], index=y.index)
labels.loc[labels['outliers'] == 1, 'outliers'] = 0
labels.loc[labels['outliers'] == -1, 'outliers'] = 1
precision = metrics.precision_score(y.values, labels.values)
recall = metrics.recall_score(y.values, labels.values)
fbeta = metrics.fbeta_score(y.values, labels.values, beta=2)
print('PRECISION %.4f' % precision)
print('RECALL %.4f' % recall)
print('FB SCORE %.4f' % fbeta)
if validation:
assert validation[0].shape[1] > validation[1].shape[1], 'X valid has less columns than Y valid'
predict_valid = db.predict(validation[0])
predict_valid = | pd.DataFrame(predict_valid, columns=['outliers']) | pandas.DataFrame |
import pandas as pd
import numpy as np
START_PULL_UPS_UPPER_ANGLE_THRESHOLD = 40
END_PULL_UPS_UPPER_ANGLE_THRESHOLD = 130
TIME_FRAME_LIST = 20
reps_position = []
count_reps = 0
in_reps = 0
precedent_pos = 0
df_reps = pd.DataFrame(columns=['x_Nose','y_Nose','x_Neck','y_Neck','x_RShoulder','y_RShoulder','x_RElbow',
'y_RElbow','x_RWrist','y_RWrist','x_LShoulder','y_LShoulder','x_LElbow','y_LElbow','x_LWrist','y_LWrist',
'x_RHip','y_RHip','x_RKnee','y_RKnee','x_RAnkle','y_RAnkle','x_LHip','y_LHip','x_LKnee','y_LKnee','x_LAnkle','y_LAnkle',
'x_REye','y_REye','x_LEye','y_LEye','x_REar','y_REar','x_LEar','y_LEar','Right_Up_Angle','Left_Up_Angle','Right_Low_Angle','Left_Low_Angle'])
def start_reps_pull_ups(right_upper_angle,left_upper_angle,y_RWrist,y_LWrist,y_RElbow,y_LElbow):
if right_upper_angle < START_PULL_UPS_UPPER_ANGLE_THRESHOLD and left_upper_angle < START_PULL_UPS_UPPER_ANGLE_THRESHOLD and y_RWrist < y_RElbow and y_LWrist < y_LElbow:
return 1
else:
return 0
def count_pull_ups_rep(pos_list,right_upper_angle,left_upper_angle):
if right_upper_angle > 80 and left_upper_angle > 80 and mean_list(pos_list) >= 0.2:
return [] ,1
else:
return pos_list,0
def mean_list(pos_list):
if len(pos_list) < TIME_FRAME_LIST :
return 0
else:
return sum(pos_list[-TIME_FRAME_LIST:])/TIME_FRAME_LIST
df_human = pd.read_csv('./keypoints/IMG_6606human_1.csv')
del df_human['Unnamed: 0']
for k in range(len(df_human[:800])):
print(k)
val_start_reps = start_reps_pull_ups(df_human['Right_Up_Angle'][k],df_human['Left_Up_Angle'][k],df_human['y_RWrist'][k],df_human['y_LWrist'][k],df_human['y_RElbow'][k],df_human['y_LElbow'][k])
reps_position.append(val_start_reps)
reps_position, val_count = count_pull_ups_rep(reps_position,df_human['Right_Up_Angle'][k], df_human['Left_Up_Angle'][k])
if val_count:
count_reps = count_reps + 1
if val_start_reps:
in_reps = 1
if in_reps:
df_reps = df_reps.append(df_human.iloc[k])
if precedent_pos == 0 and val_start_reps == 1:
if count_reps == 0:
pass
else:
if len(df_reps) <= 30:
df_reps = pd.DataFrame(columns=['x_Nose','y_Nose','x_Neck','y_Neck','x_RShoulder','y_RShoulder','x_RElbow',
'y_RElbow','x_RWrist','y_RWrist','x_LShoulder','y_LShoulder','x_LElbow','y_LElbow','x_LWrist','y_LWrist',
'x_RHip','y_RHip','x_RKnee','y_RKnee','x_RAnkle','y_RAnkle','x_LHip','y_LHip','x_LKnee','y_LKnee','x_LAnkle','y_LAnkle',
'x_REye','y_REye','x_LEye','y_LEye','x_REar','y_REar','x_LEar','y_LEar','Right_Up_Angle','Left_Up_Angle','Right_Low_Angle','Left_Low_Angle'])
else:
print(df_reps)
df_reps = | pd.DataFrame(columns=['x_Nose','y_Nose','x_Neck','y_Neck','x_RShoulder','y_RShoulder','x_RElbow',
'y_RElbow','x_RWrist','y_RWrist','x_LShoulder','y_LShoulder','x_LElbow','y_LElbow','x_LWrist','y_LWrist',
'x_RHip','y_RHip','x_RKnee','y_RKnee','x_RAnkle','y_RAnkle','x_LHip','y_LHip','x_LKnee','y_LKnee','x_LAnkle','y_LAnkle',
'x_REye','y_REye','x_LEye','y_LEye','x_REar','y_REar','x_LEar','y_LEar','Right_Up_Angle','Left_Up_Angle','Right_Low_Angle','Left_Low_Angle']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
Estacion = '6001'
df1 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/6001Historico.txt', parse_dates=[2])
Theoric_rad_method = 'GIS_Model' ##-->> PARA QUE USE EL MODELO DE Gis DEBE SER 'GIS_Model'
resolucion = 'diaria' ##-->> LAS OPCIONES SON 'diaria' U 'horaria'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## ---CALCULO DE LA RADIACIÓN TEORICA--- ##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model_hora(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
'pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_nati/Total_Timeseries.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion_Teo'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
df_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
df_hourly_theoric = df_hourly_theoric[df_hourly_theoric['Radiacion_Teo'] > 0]
return df_hourly_theoric
def Elevation_RadiationTA(n, lat, lon, start):
'Para obtener la radiación en W/m2 y el ángulo de elevación del sol en grados horariamente para un número "n" de ' \
'días aun punto en una latitud y longitud determinada ( "lat-lon"como flotantes) a partir de una fecha de inicio ' \
'"start" como por ejemplo datetime.datetime(2018, 1, 1, 8).'
import pysolar
import pytz
import datetime
timezone = pytz.timezone("America/Bogota")
start_aware = timezone.localize(start)
# Calculate radiation every hour for 365 days
nhr = 24*n
dates, altitudes_deg, radiations = list(), list(), list()
for ihr in range(nhr):
date = start_aware + datetime.timedelta(hours=ihr)
altitude_deg = pysolar.solar.get_altitude(lat, lon, date)
if altitude_deg <= 0:
radiation = 0.
else:
radiation = pysolar.radiation.get_radiation_direct(date, altitude_deg)
dates.append(date)
altitudes_deg.append(altitude_deg)
radiations.append(radiation)
days = [ihr/24 for ihr in range(nhr)]
return days, altitudes_deg, radiations
if Theoric_rad_method != 'GIS_Model' and Estacion == '6001':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.259, -75.588, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6002':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.168, -75.644, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6003':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.255, -75.542, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method == 'GIS_Model':
Io_hora = serie_Kumar_Model_hora(Estacion)
print('Teorica con el modelo de KUMAR')
###############################################################################
##--------------EFICIENCIAS TEORICAS COMO PROXI DE TRANSPARENCIA-------------##
###############################################################################
'Calculo de la eficiencias teorica como proxi de la transparencia de la atmosfera'
'Para esto se hace uso de la información del piranometro y de la radiación teórica'
'de <NAME>, con esto se prentenden obtener las caracteristicas que deriven'
'del análisis estocastico, similar al de <NAME> en su tesis de doctorado.'
##------------------LECTURA DE LOS DATOS DEL EXPERIMENTO----------------------##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de potencia mayores o iguales a 0, los que parecen generarse una'
'hora despues de cuando empieza a incidir la radiación.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P350 = df_P350[(df_P350['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P348 = df_P348[(df_P348['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P975_h = df_P975_h.between_time('06:00', '17:00')
df_P350_h = df_P350_h.between_time('06:00', '17:00')
df_P348_h = df_P348_h.between_time('06:00', '17:00')
##----AJUSTE DE LOS DATOS DE RADIACIÓN TEORICA AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
Io_hora_975 = serie_Kumar_Model_hora('6001')
Io_hora_350 = serie_Kumar_Model_hora('6002')
Io_hora_348 = serie_Kumar_Model_hora('6003')
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
Io_hora_975 = Io_hora_975.loc[(Io_hora_975.index >= '2018-03-20') & (Io_hora_975.index <= '2018-'+str(df_P975.index[-1].month)+'-'+str(df_P975.index[-1].day+1))]
Io_hora_350 = Io_hora_350.loc[(Io_hora_350.index >= '2018-03-22') & (Io_hora_350.index <= '2018-'+str(df_P350.index[-1].month)+'-'+str(df_P350.index[-1].day+1))]
Io_hora_348 = Io_hora_348.loc[(Io_hora_348.index >= '2018-03-23') & (Io_hora_348.index <= '2018-'+str(df_P348.index[-1].month)+'-'+str(df_P348.index[-1].day+1))]
Io_hora_975 = Io_hora_975.between_time('06:00', '17:00')
Io_hora_975.index = [Io_hora_975.index[i].replace(year=2019) for i in range(len(Io_hora_975.index))]
Io_hora_350 = Io_hora_350.between_time('06:00', '17:00')
Io_hora_350.index = [Io_hora_350.index[i].replace(year=2019) for i in range(len(Io_hora_350.index))]
Io_hora_348 = Io_hora_348.between_time('06:00', '17:00')
Io_hora_348.index = [Io_hora_348.index[i].replace(year=2019) for i in range(len(Io_hora_348.index))]
df_Rad_P975 = pd.concat([Io_hora_975, df_P975_h], axis = 1)
df_Rad_P350 = pd.concat([Io_hora_350, df_P350_h], axis = 1)
df_Rad_P348 = pd.concat([Io_hora_348, df_P348_h], axis = 1)
df_Rad_P975 = df_Rad_P975.drop(['NI','strength'], axis=1)
df_Rad_P350 = df_Rad_P350.drop(['NI','strength'], axis=1)
df_Rad_P348 = df_Rad_P348.drop(['NI','strength'], axis=1)
##--------------------EFICIANCIA REAL PROXI DE TRANSPARENCIA-----------------##
df_Rad_P975['Efi_Transp'] = df_Rad_P975['radiacion'] / df_Rad_P975['Radiacion_Teo']
df_Rad_P350['Efi_Transp'] = df_Rad_P350['radiacion'] / df_Rad_P350['Radiacion_Teo']
df_Rad_P348['Efi_Transp'] = df_Rad_P348['radiacion'] / df_Rad_P348['Radiacion_Teo']
##-----------------HORAS EN LA QUE SE PRODUCE LA MAYOR EFICIENCIA Y SU HISTOGRAMA-------------##
'La frecuencia de las horas que excedieron el máximo de la eficiencia (1), se presenta en el hisograma'
'a continuación. El resultado muestra que las mayores frecuencias se presentan a als 6 y las 7 de la ma-'
'ñana, y esto es atribuible a falencias en el modelo de radiacion en condiciones de cierlo despejado'
'en esos puntos.'
Hour_Max_Efi_975 = df_Rad_P975[df_Rad_P975['Efi_Transp']>1].index.hour
Hour_Max_Efi_350 = df_Rad_P350[df_Rad_P350['Efi_Transp']>1].index.hour
Hour_Max_Efi_348 = df_Rad_P348[df_Rad_P348['Efi_Transp']>1].index.hour
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hour_Max_Efi_348, bins='auto', alpha = 0.5)
ax1.set_title(u'Distribución horas de excedencia \n de la eficiencia en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hour_Max_Efi_350, bins='auto', alpha = 0.5)
ax2.set_title(u'Distribución horas de excedencia \n de la eficiencia en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hour_Max_Efi_975, bins='auto', alpha = 0.5)
ax3.set_title(u'Distribución horas de excedencia \n de la eficiencia en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHoraExceEfi.png')
plt.show()
##-------DISCRIMINACION ENTRE DIAS LLUVIOSOS Y SECOS POR PERCENTILES DE RADIACION--------##
'Para lidiar cno la situación en que pueden haber dias en los que los piranometros solo midieron'
'durante una fracción del día por posibles daños y alteraciones, se deben considerar los dias que'
'al menos tuvieron 6 horas de medicion.'
df_Rad_P975_count_h_pira = df_Rad_P975.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P350_count_h_pira = df_Rad_P350.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P348_count_h_pira = df_Rad_P348.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
days_P975_count_h_pira = df_Rad_P975_count_h_pira.index[df_Rad_P975_count_h_pira == True]
days_P350_count_h_pira = df_Rad_P350_count_h_pira.index[df_Rad_P350_count_h_pira == True]
days_P348_count_h_pira = df_Rad_P348_count_h_pira.index[df_Rad_P348_count_h_pira == True]
'Se establecieron umbrales empiricamente para la seleccion de los dias marcadamente nubados y'
'marcadamente despejados dentro el periodo de registro, de acuerdo a los procedimentos en el'
'programa Umbrales_Radiacion_Piranometro.py'
Sum_df_Rad_P975 = df_Rad_P975.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P350 = df_Rad_P350.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P348 = df_Rad_P348.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['radiacion']>0]
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['radiacion']>0]
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['radiacion']>0]
lista_days_975 = []
for i in range(len(Sum_df_Rad_P975)):
if Sum_df_Rad_P975.index[i] in days_P975_count_h_pira:
lista_days_975.append(1)
else:
lista_days_975.append(0)
Sum_df_Rad_P975['days'] = lista_days_975
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['days'] == 1]
Sum_df_Rad_P975 = Sum_df_Rad_P975.drop(['days'], axis = 1)
lista_days_350 = []
for i in range(len(Sum_df_Rad_P350)):
if Sum_df_Rad_P350.index[i] in days_P350_count_h_pira:
lista_days_350.append(1)
else:
lista_days_350.append(0)
Sum_df_Rad_P350['days'] = lista_days_350
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['days'] == 1]
Sum_df_Rad_P350 = Sum_df_Rad_P350.drop(['days'], axis = 1)
lista_days_348 = []
for i in range(len(Sum_df_Rad_P348)):
if Sum_df_Rad_P348.index[i] in days_P348_count_h_pira:
lista_days_348.append(1)
else:
lista_days_348.append(0)
Sum_df_Rad_P348['days'] = lista_days_348
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['days'] == 1]
Sum_df_Rad_P348 = Sum_df_Rad_P348.drop(['days'], axis = 1)
Desp_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion>=(Sum_df_Rad_P975.Radiacion_Teo)*0.85]
Desp_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion>=(Sum_df_Rad_P350.Radiacion_Teo)*0.78]
Desp_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion>=(Sum_df_Rad_P348.Radiacion_Teo)*0.80]
Nuba_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion<=(Sum_df_Rad_P975.Radiacion_Teo)*0.25]
Nuba_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion<=(Sum_df_Rad_P350.Radiacion_Teo)*0.25]
Nuba_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion<=(Sum_df_Rad_P348.Radiacion_Teo)*0.22]
Appended_data_desp_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Appended_data_desp_975.append(df_P975_h[df_P975_h.index.date == Desp_Pira_975.index.date[i]])
Appended_data_desp_975 = | pd.concat(Appended_data_desp_975) | pandas.concat |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
| Timestamp('2013-01-02') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Reading data for WB, PRO,
for kennisimpulse project
to read data from province, water companies, and any other sources
Created on Sun Jul 26 21:55:57 2020
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import pickle as pckl
from hgc import ner
from hgc import io
import tests
# import xlsxwriter
def test_province():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+'/provincie_data_long_preprocessed.csv'
df_temp = pd.read_csv(WD, encoding='ISO-8859-1', header=None)
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 25].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 26].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'stacked',
'shape': 'stacked',
'slice_header': [1, slice(1, None)],
'slice_data': [slice(1, n_row), slice(1, None)],
'map_header': {
**io.default_map_header(),
'MeetpuntId': 'LocationID',
'parameter':'Feature',
'eenheid': 'Unit',
'waarde': 'Value',
'Opgegeven bemonstering datum': 'Datetime',
'Monsternummer': 'SampleID', # "SampleID" already exists as header, but contains wrong date. Use "Sample number" as "SampleID"
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+r'/provincie_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_prov')
df2.to_excel(writer, sheet_name='df_prov')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKZUID():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
df_temp = pd.read_csv(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
df2.to_excel(writer, sheet_name='KIWK_Zuid')
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK_Zuid')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKVenloschol():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Venloschol_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l atrazine-D5':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Venloschol_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK Venloschol')
df2.to_excel(writer, sheet_name='KIWK Venloschol')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKRoerdalslenk():
WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Roerdalslenk_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Roerdalslenk_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK Roerdalslenk')
df2.to_excel(writer, sheet_name='KIWK Roerdalslenk')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKHeelBeegden():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Heel Beegden_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Heel Beegden_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWKHeelBeegden')
df2.to_excel(writer, sheet_name='KIWKHeelBeegden')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_WBGR():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WBGR_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1', sheet_name='Resultaten')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 6].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 11].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Resultaten',
'shape': 'stacked',
'slice_header': [1, slice(1, 12)],
'slice_data': [slice(1, n_row), slice(1, 12)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter':'Feature',
'Eenheid': 'Unit',
'Resultaat': 'Value', # Gerapporteerde waarde, right?!
'Datum': 'Datetime',
'Beschrijving': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WBGR_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_WBGR')
df2.to_excel(writer, sheet_name='WBGR')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_WMD():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WMD_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1', sheet_name='Resultaten WMD')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 6].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 11].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Resultaten WMD',
'shape': 'stacked',
'slice_header': [1, slice(1, 12)],
'slice_data': [slice(1, n_row), slice(1, 12)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter':'Feature',
'Eenheid': 'Unit',
'Resultaat': 'Value', # Gerapporteerde waarde, right?!
'Datum': 'Datetime',
'Beschrijving': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WMD_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_WMD')
df2.to_excel(writer, sheet_name='WMD')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_BOexport_bewerkt():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/BOexport_bewerkt_preprocessed.xlsx'
df_temp = | pd.read_excel(WD, header=None, encoding='ISO-8859-1') | pandas.read_excel |
import numpy as np
import pylab as pl
import seaborn as sns
from remodnav import EyegazeClassifier
from remodnav.tests.test_labeled import load_data as load_anderson
import pdb
#pdb.set_trace() to set breakpoint
import pandas as pd
labeled_files = {
'dots': [
'TH20_trial1_labelled_{}.mat',
'TH38_trial1_labelled_{}.mat',
'TL22_trial17_labelled_{}.mat',
'TL24_trial17_labelled_{}.mat',
'UH21_trial17_labelled_{}.mat',
'UH21_trial1_labelled_{}.mat',
'UH25_trial1_labelled_{}.mat',
'UH33_trial17_labelled_{}.mat',
'UL27_trial17_labelled_{}.mat',
'UL31_trial1_labelled_{}.mat',
'UL39_trial1_labelled_{}.mat',
],
'img': [
'TH34_img_Europe_labelled_{}.mat',
'TH34_img_vy_labelled_{}.mat',
'TL20_img_konijntjes_labelled_{}.mat',
'TL28_img_konijntjes_labelled_{}.mat',
'UH21_img_Rome_labelled_{}.mat',
'UH27_img_vy_labelled_{}.mat',
'UH29_img_Europe_labelled_{}.mat',
'UH33_img_vy_labelled_{}.mat',
'UH47_img_Europe_labelled_{}.mat',
'UL23_img_Europe_labelled_{}.mat',
'UL31_img_konijntjes_labelled_{}.mat',
'UL39_img_konijntjes_labelled_{}.mat',
'UL43_img_Rome_labelled_{}.mat',
'UL47_img_konijntjes_labelled_{}.mat',
],
'video': [
'TH34_video_BergoDalbana_labelled_{}.mat',
'TH38_video_dolphin_fov_labelled_{}.mat',
'TL30_video_triple_jump_labelled_{}.mat',
'UH21_video_BergoDalbana_labelled_{}.mat',
'UH29_video_dolphin_fov_labelled_{}.mat',
'UH47_video_BergoDalbana_labelled_{}.mat',
'UL23_video_triple_jump_labelled_{}.mat',
'UL27_video_triple_jump_labelled_{}.mat',
'UL31_video_triple_jump_labelled_{}.mat',
],
}
def get_durations(events, evcodes):
events = [e for e in events if e['label'] in evcodes]
# TODO minus one sample at the end?
durations = [e['end_time'] - e['start_time'] for e in events]
return durations
def print_duration_stats():
for stimtype in ('img', 'dots', 'video'):
#for stimtype in ('img', 'video'):
for coder in ('MN', 'RA'):
print(stimtype, coder)
fixation_durations = []
saccade_durations = []
pso_durations = []
purs_durations = []
for fname in labeled_files[stimtype]:
data, target_labels, target_events, px2deg, sr = load_anderson(
stimtype, fname.format(coder))
fixation_durations.extend(get_durations(
target_events, ['FIXA']))
saccade_durations.extend(get_durations(
target_events, ['SACC']))
pso_durations.extend(get_durations(
target_events, ['PSO']))
purs_durations.extend(get_durations(
target_events, ['PURS']))
print(
'FIX: %i (%i) [%i]' % (
np.mean(fixation_durations) * 1000,
np.std(fixation_durations) * 1000,
len(fixation_durations)))
print(
'SAC: %i (%i) [%i]' % (
np.mean(saccade_durations) * 1000,
np.std(saccade_durations) * 1000,
len(saccade_durations)))
print(
'PSO: %i (%i) [%i]' % (
np.mean(pso_durations) * 1000,
np.std(pso_durations) * 1000,
len(pso_durations)))
print(
'PURS: %i (%i) [%i]' % (
np.mean(purs_durations) * 1000,
np.std(purs_durations) * 1000,
len(purs_durations)))
def remodnav_on_anderson_mainseq(superimp = "trials"):
""" by default will make main sequences for each trial/file.
superimp = "stimulus" for superimposed main sequences of each stimulus
type"""
for stimtype in ('img', 'dots', 'video'):
#for stimtype in ('img', 'video'):
if superimp == "stimulus":
pl.figure(figsize=(6,4))
coder = 'MN'
print(stimtype, coder)
fixation_durations = []
saccade_durations = []
pso_durations = []
purs_durations = []
for fname in labeled_files[stimtype]:
data, target_labels, target_events, px2deg, sr = load_anderson(
stimtype, fname.format(coder))
clf = EyegazeClassifier(
px2deg=px2deg,
sampling_rate=sr,
pursuit_velthresh=5.,
noise_factor=3.0,
lowpass_cutoff_freq=10.0,
)
p = clf.preproc(data)
events = clf(p)
events = pd.DataFrame(events)
saccades = events[events['label'] == 'SACC']
isaccades = events[events['label'] == 'ISAC']
hvpso = events[(events['label'] == 'HPSO') | (events['label'] == 'IHPS')]
lvpso = events[(events['label'] == 'LPSO') | (events['label'] == 'ILPS')]
if superimp == "trials":
pl.figure(figsize=(6,4))
for ev, sym, color, label in (
(saccades, '.', 'xkcd:green grey', 'Segment defining saccade'),
(isaccades, '.', 'xkcd:dark olive', 'Saccades'),
(hvpso, '+', 'xkcd:pinkish', 'High velocity PSOs'),
(lvpso, '+', 'xkcd:wine', 'PSOs'))[::-1]:
pl.loglog(ev['amp'], ev['peak_vel'], sym, color=color,
alpha=1, lw=1, label=label)
pl.ylim((10.0, 1000)) #previously args.max_vel, put this back in
pl.xlim((0.01, 40.0))
pl.legend(loc=4)
pl.ylabel('peak velocities (deg/s)')
pl.xlabel('amplitude (deg)')
if superimp == "trials":
pl.savefig(
'{}_{}_remodnav_on_testdata_mainseq.svg'.format(stimtype,fname[0:15]),bbox_inches='tight', format='svg')
if superimp == "stimulus":
pl.savefig(
'{}_remodnav_on_testdata_superimp_mainseq.svg'.format(stimtype,fname[0:15]),bbox_inches='tight', format='svg')
pl.close('all')
def preproc_on_anderson_mainseq():
#for sequentially making main sequences of all the available files
for stimtype in ('img', 'dots', 'video'):
#for stimtype in ('img', 'video'):
for coder in ('MN', 'RA'):
print(stimtype, coder)
fixation_durations = []
saccade_durations = []
pso_durations = []
purs_durations = []
for fname in labeled_files[stimtype]:
data, target_labels, target_events, px2deg, sr = load_anderson(
stimtype, fname.format(coder))
clf = EyegazeClassifier(
px2deg=px2deg,
sampling_rate=sr,
pursuit_velthresh=5.,
noise_factor=3.0,
lowpass_cutoff_freq=10.0,
)
pproc = clf.preproc(data)
pproc_df = pd.DataFrame(pproc)
target_events_df = pd.DataFrame(target_events)
saccade_events = target_events_df[target_events_df.label == "SACC"]
peak_vels = []
amp = []
for row in target_events_df.itertuples():
peak_vels.append(pproc_df.vel.loc[row.start_index:row.end_index].max())
amp.append ((((pproc_df.x.loc[row.start_index] - pproc_df.x.loc[row.end_index]) ** 2 + \
(pproc_df.y.loc[row.start_index] - pproc_df.y.loc[row.end_index]) ** 2) ** 0.5) * px2deg)
peaks_amps_df = pd.DataFrame({'peak_vels':peak_vels,'amp':amp})
target_events_df= pd.concat([target_events_df, peaks_amps_df], axis=1)
saccades = target_events_df[target_events_df['label'] == 'SACC']
pso = target_events_df[target_events_df['label'] == 'PSO']
pl.figure(figsize=(6,4))
for ev, sym, color, label in (
(saccades, '.', 'black', 'saccades'),
(pso, '+', 'xkcd:burnt sienna', 'PSOs'))[::-1]:
pl.loglog(ev['amp'], ev['peak_vels'], sym, color=color,
alpha=.2, lw=1, label=label)
pl.ylim((10.0, 1000)) #previously args.max_vel, put this back in
pl.xlim((0.01, 40.0))
pl.legend(loc=4)
pl.ylabel('peak velocities (deg/s)')
pl.xlabel('amplitude (deg)')
pl.tick_params(which='both',direction = 'in')
pl.savefig(
'{}_{}_{}_mainseq_preproc_on_anderson.svg'.format(stimtype, coder,fname[0:15]),bbox_inches='tight', format='svg')
print(len(peak_vels))
print(len(amp))
def preproc_on_anderson_mainseq_superimp(superimp = "coders"):
""" by default will make main sequences for each coder for each file
"stimulus" for superimposed main sequences of each stimulus type"""
#for making main sequences with Human coders superimposed on one another
for stimtype in ('img', 'dots', 'video'):
#for stimtype in ('img', 'video'):
if superimp == "stimulus":
pl.figure(figsize=(6,4))
for coder in ('MN', 'RA'):
print(stimtype, coder)
fixation_durations = []
saccade_durations = []
pso_durations = []
purs_durations = []
for fname in labeled_files[stimtype]:
data, target_labels, target_events, px2deg, sr = load_anderson( #change to load_anderson
stimtype, fname.format(coder))
clf = EyegazeClassifier(
px2deg=px2deg,
sampling_rate=sr,
pursuit_velthresh=5.,
noise_factor=3.0,
lowpass_cutoff_freq=10.0,
)
pproc = clf.preproc(data)
pproc_df = pd.DataFrame(pproc)
target_events_df = pd.DataFrame(target_events)
saccade_events = target_events_df[target_events_df.label == "SACC"]
peak_vels = []
amp = []
for row in target_events_df.itertuples():
peak_vels.append(pproc_df.vel.loc[row.start_index:row.end_index].max())
amp.append ((((pproc_df.x.loc[row.start_index] - pproc_df.x.loc[row.end_index]) ** 2 + \
(pproc_df.y.loc[row.start_index] - pproc_df.y.loc[row.end_index]) ** 2) ** 0.5) * px2deg)
peaks_amps_df = pd.DataFrame({'peak_vels':peak_vels,'amp':amp})
target_events_df= | pd.concat([target_events_df, peaks_amps_df], axis=1) | pandas.concat |
import os
import json
import pandas as pd
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import math
import configparser
import logging
import pickle
config = configparser.ConfigParser()
config.read('../config.ini')
logger = logging.getLogger(__name__)
class RealEstateData:
"""
RealEstateData is designed to collect real estate listings for analysis from a given CITY, STATE,
parsing data from the RapidAPI for Realtor.com.
Use Guidelines:
my_real_estate = RealEstateData('CITY', 'STATE_CODE')
my_real_estate_results = my_real_estate.results
To Do:
- Check for null values in API return
- Check for invalid input
"""
def __init__(self, city, state, api):
self.city = city.upper()
self.state = state.upper()
self.api = api
self._url = config.get(api, 'rapidapi_url')
self._jsonREData = self._fetch_housing_data()
self._results = self._parse()
self._requests_remaining = 99999
def __repr__(self):
return f"RealEstateData('{self.city, self.state, self.api}')"
def __str__(self):
return f'{self.city, self.state, self.api} real estate data'
def get_results(self):
return self._results
def _fetch_housing_data(self):
"""
Function to fetch all housing data from Realtor.com via RapidAPI
:return: Dictionary of Dictionaries containing all the results from the the API call
"""
list_json_data = None
list_missed_states = []
list_missed_cities = []
list_missed_offsets = []
list_collected_data = []
response = self.api_call()
if self.validate_api_call(response):
json_content = json.loads(response.content)
list_json_data = [json_content]
housing_total = self.get_housing_total(json_content=json_content)
list_offsets = self.define_chunks(total=housing_total)
for offset in list_offsets:
response = self.api_call(offset=offset)
if self.validate_api_call(response):
list_json_data.append(json_content)
else: # Try again Error is usually 500: Error JSON parsing
response = self.api_call(offset=offset)
if self.validate_api_call(response):
list_json_data.append(json_content)
else:
logger.error(f'{self.state}-{self.city} failed on offset: {offset}')
list_missed_states.append(self.state)
list_missed_cities.append(self.city)
list_missed_offsets.append(offset)
list_collected_data.append(-1)
dict_missed_data = {'state': list_missed_states, 'city': list_missed_cities,
'offset': list_missed_offsets, 'collected': list_collected_data}
if os.path.exists('../../data/models/missed_data.pickle'):
with open('../../data/models/missed_data.pickle', 'rb') as file:
df = pickle.load(file)
df = df.append(dict_missed_data, ignore_index=True)
else:
df = | pd.DataFrame(dict_missed_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_validate
from pandas.api.types import is_numeric_dtype
import statsmodels.api as sm
import warnings
import time
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
class LinearRegressionClass:
def __init__(self,df,response,sig_level=0.05,max_iter=500,cols_to_keep_static=[],cols_to_try_individually=[]):
'''
:param df: a dataframe
:param response: a string. This must be an existing column in df
:param sig_level: a float. The significance level the forward selection will use
:param max_iter: an integer. The maximum iterations the solvers will use to try to converge
:param cols_to_keep_static: a list. Used in forward selection to not omit these columns
:param cols_to_try_individually: a list. The columns to test in a regression one at a time to identify which
one has the greatest relationship with the response controlled for the cols_to_keep_static
'''
# attach attributes to the object
self.df = df.copy()
self.response = response
self.sig_level = sig_level
self.max_iter=max_iter
self.warnings = ''
self.error_message = ''
self.cols_to_keep_static = cols_to_keep_static
self.cols_to_try_individually = cols_to_try_individually
if self.response in self.cols_to_keep_static:
print('The response - {} is in the static columns. Removed it.'.format(response))
self.cols_to_keep_static = list(filter(lambda x: x != self.response,self.cols_to_keep_static))
if self.response in self.cols_to_try_individually:
print('The response - {} is in the cols to try individually columns. Removed it.'.format(response))
self.cols_to_try_individually = list(filter(lambda x: x != self.response,self.cols_to_try_individually))
def prepare_data(self,df,response):
y = df[response]
X = df[list(filter(lambda x: x != response, df.columns))]
X = sm.add_constant(X, has_constant='add')
return X, y
def linear_regression_utility_check_response(self,series):
if (not is_numeric_dtype(series)):
self.error_message = self.error_message + '\n' + 'The response variable should be numeric type'
print('The response variable should be numeric type')
return False
return True
def lin_reg_diagnostic_performance(self,X,y):
cvs = cross_validate(LinearRegression(), X, y, cv=5,
scoring=['r2', 'neg_mean_squared_error', 'neg_root_mean_squared_error'])
s = """Performance\n5-Fold Cross Validation Results:\nTest Set r2 = {}\nneg_mean_squared_error = {}\nneg_root_mean_squared_error = {}""".format(
round(cvs['test_r2'].mean(), 2), round(cvs['test_neg_mean_squared_error'].mean(), 2),
round(cvs['test_neg_root_mean_squared_error'].mean(), 2))
self.performance = s
self.performance_df = pd.DataFrame(data=[round(cvs['test_r2'].mean(), 2), round(cvs['test_neg_mean_squared_error'].mean(), 2),
round(cvs['test_neg_root_mean_squared_error'].mean(), 2)],
index=['test_r2','test_neg_mean_squared_error','test_neg_root_mean_squared_error'],
columns=['Score'])
return s
def lin_reg_diagnostic_correlations(self,X):
print("Correlations")
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
upp_mat = np.triu(X.corr())
sns.heatmap(X.corr(), vmin=-1, vmax=+1, annot=True, cmap='coolwarm', mask=upp_mat, ax=ax)
self.fig_correlations = fig
self.ax_correlations = ax
return fig,ax
def linear_regression_get_report(self,model,X,y,verbose=True):
pass
def prepare_categories(self,df, response, drop=False):
cat_cols = list(filter(lambda x: not is_numeric_dtype(df[x]), df.columns))
cat_cols = list(set(cat_cols) - {response} - set(self.cols_to_keep_static))
df = pd.get_dummies(df, columns=cat_cols, drop_first=drop)
df = pd.get_dummies(df, columns=self.cols_to_keep_static, drop_first=True)
self.cols_to_keep_static_dummified = []
for col in self.cols_to_keep_static:
for col_dummy in df.columns:
if col in col_dummy:
self.cols_to_keep_static_dummified.append(col_dummy)
return df
def get_interpretation(self,result=None,feature_list=None,df=None):
'''
Given a trained model, calculate the average probabilities due to feature changes
'''
if (result is None) or (feature_list is None):
try:
feature_list = self.X_with_feature_selection.columns
result = self.result_with_feature_selection
except:
feature_list = self.X.columns
try:
result = self.result
except:
result = self.basic_result
# take a copy of the original df and prepare the dataset
if df is None:
df = self.df.copy()
df_temp = df.copy()
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp,self.response)
full_feature_list = list(feature_list)
if 'const' not in full_feature_list:
full_feature_list = ['const'] + full_feature_list
# comparative uplift section
comparative_dict = dict()
for col1 in df.columns:
for col2 in full_feature_list:
# if this feature was dummified
if col1 + '_' in col2:
t = X[full_feature_list].copy()
# First get prediction with 0
t[col2] = 0
comparative_dict[col2] = [result.predict(t).mean()]
# Then get prediction with 1
t[col2] = 1
comparative_dict[col2].append(result.predict(t).mean())
elif col1 == col2:
t = X[full_feature_list].copy()
# first get prediction with average
t[col2] = t[col2].mean()
comparative_dict[col2] = [result.predict(t).mean()]
# then get prediction with +1
t[col2] = t[col2] + 1
comparative_dict[col2].append(result.predict(t).mean())
feature_interpretability_comparative_df = pd.DataFrame(comparative_dict).T
feature_interpretability_comparative_df.columns = ['Prediction_average_or_without','Prediction_add1_or_with']
feature_interpretability_comparative_df['diff'] = feature_interpretability_comparative_df['Prediction_add1_or_with'] - feature_interpretability_comparative_df['Prediction_average_or_without']
self.feature_interpretability_comparative_df = feature_interpretability_comparative_df
# get a base probability (this is just the average probability)
base_probability = result.predict(X[full_feature_list]).mean()
probability_dict = dict()
probability_dict['base'] = base_probability
# for each column in the original df
for col in df.columns:
# for each column in the result's feature list
for col2 in feature_list:
# check if this feature was dummified from this column
if col + '_' in col2:
# if this feature was dummified from this column then update this column to be this feature value
df_temp = df.copy()
df_temp[col] = col2.replace(col + '_', '')
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp, self.response)
# check that all features the model is expecting exist in X
for col3 in feature_list:
if col3 not in X.columns:
X[col3] = 0
# calculate the probability
probability = result.predict(X[full_feature_list]).mean()
probability_dict[col2] = probability
elif col == col2:
# if this column was not dummified then it is numeric so add 1 to it
df_temp = df.copy()
df_temp[col] = df_temp[col] + 1
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp, self.response)
probability = result.predict(X[full_feature_list]).mean()
probability_dict[col2] = probability
# save the probability dictionary
self.feature_interpretability_dict = probability_dict
self.feature_interpretability_df = pd.DataFrame(data=probability_dict.values(), index=probability_dict.keys(), columns=['Probability'])
return self.feature_interpretability_df
def lin_reg_basic(self,df=None):
'''
Run a basic logistic regression model
'''
if df is None:
df = self.df
X, y = self.prepare_data(df, self.response)
model = sm.OLS(y, X)
result = model.fit(maxiter=self.max_iter)
self.basic_result = result
self.basic_model = model
self.X = X
self.y = y
return result
def predict_from_original(self,df):
df = self.prepare_categories(df, self.response, drop=False)
all_cols = []
try:
all_cols = list(self.X_with_feature_selection.columns)
except:
all_cols = list(self.X.columns)
for col in all_cols:
if col not in df.columns:
df[col] = 0
res = None
try:
res = self.result_with_feature_selection
except:
res = self.result
return res.predict(df[all_cols])
def lin_reg(self,df=None):
if df is None:
df1 = self.df[~self.df.isna().any(axis=1)].copy()
if len(df1) < len(self.df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(len(self.df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
else:
df1 = df[~df.isna().any(axis=1)].copy()
if len(df1) < len(df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
if not self.linear_regression_utility_check_response(df1[self.response]):
return None
df1 = self.prepare_categories(df1,self.response,drop=True)
result = self.lin_reg_basic(df1)
self.result = result
self.model = self.basic_model
return result
def lin_reg_with_feature_selection(self,df=None,run_for=0,verbose=True):
# start the timer in case the is a time limit specified
start_time = time.time()
if df is None:
# get rid of nans. There should be no nans. Imputation should be performed prior to this point
df1 = self.df[~self.df.isna().any(axis=1)].copy()
# show a warning to let the user know of the droppped nans
if len(df1) < len(self.df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(self.df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
else:
# get rid of nans. There should be no nans. Imputation should be performed prior to this point
df1 = df[~df.isna().any(axis=1)].copy()
# show a warning to let the user know of the droppped nans
if len(df1) < len(df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
# check that the response is in the correct format to perform linear regression
if not self.linear_regression_utility_check_response(df1[self.response]):
return None
# automatically identify categorical variables and dummify them
df1 = self.prepare_categories(df1, self.response, drop=False)
# raise a warning if the number of columns surpasses the number of rows
if len(df1.columns) > len(df1):
warnings.warn(
'Note: The number of columns after getting dummies is larger than the number of rows. n_cols = {}, nrows = {}'.format(
len(df1.columns), len(df1)))
print(
'Note: The number of columns after getting dummies is larger than the number of rows. n_cols = {}, nrows = {}'.format(
len(df1.columns), len(df1)))
# the initial list of features
remaining = list(set(df1.columns) - {self.response} - set(self.cols_to_keep_static_dummified))
# this holds the tried and successful feature set
full_feature_set = self.cols_to_keep_static_dummified
# get the first linear regression output for only the constant/base model
first_result = self.lin_reg_basic(df1[[self.response]])
# save the model and the X and y used to train it
self.X_with_feature_selection = self.X.copy()
self.y_with_feature_selection = self.y.copy()
self.model_with_feature_selection = self.basic_model
# get the r2 of the base model
rsquared = first_result.rsquared
# store the result of the first model
final_result = first_result
# while there are still remaining features to try keep looping
while len(remaining) > 0:
# store the last pseudo r2 value
last_rsquared = rsquared
# the next feature to add to the full feature set
next_col = None
# the result corresponding to the addition of the next col
next_result = None
# try adding each column from the remaining columns
for col in sorted(remaining):
# add the next column to the feature set and try it out. Try except is added because sometimes
# when categorical variables are dummified and you add both variables you get a singular matrix
this_feature_set = full_feature_set + [col]
try:
result = self.lin_reg_basic(df1[this_feature_set + [self.response]])
except Exception as e:
remaining.remove(col)
continue
# the resulting r2 from this fit
this_rsquared = result.rsquared
# if a feature results in nan for r2 skip it
if this_rsquared is np.nan:
print('Note: Feature {} is resulting with a nan r2. Skipping feature'.format(col))
continue
# this feature is recorded as a candidate if the conditions are met
if (this_rsquared > last_rsquared) and (result.pvalues.loc[col] <= self.sig_level):
last_rsquared = this_rsquared
next_col = col
next_result = result
# save the model and the X and y used to train it
self.X_with_feature_selection = self.X.copy()
self.y_with_feature_selection = self.y.copy()
self.model_with_feature_selection = self.basic_model
# if after the loop no new candidates were found then we stop looking
if next_col is None:
break
# add the candidate to the permanent list
full_feature_set.append(next_col)
# show progress
if verbose:
print('********Adding {} with prsquared = {}********'.format(next_col, last_rsquared))
# store the result
final_result = next_result
# remove the chosen candidate from the remaining features
remaining.remove(next_col)
# check if it's not taking too long
if (time.time() - start_time > run_for) and (run_for > 0):
print(
'Aborting: Has been running for {}s > {}s. {} out of {} columns left. There are probably too many categories in one of the columns'.format(
round(time.time() - start_time, 2), run_for, len(remaining), len(df1.columns) - 1))
return
self.final_feature_set = full_feature_set
self.result_with_feature_selection = final_result
return final_result
def lin_reg_one_at_a_time(self,with_feature_selection=False,get_interpretability=False):
dic = dict()
df1 = self.df.copy()
df1 = df1[[self.response]+self.cols_to_keep_static + self.cols_to_try_individually].copy()
for this_col_to_try in self.cols_to_try_individually:
if with_feature_selection:
result = self.lin_reg_with_feature_selection(df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
if get_interpretability:
self.get_interpretation(self.result_with_feature_selection,self.final_feature_set
,df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
else:
result = self.lin_reg(df=df1[self.cols_to_keep_static + [self.response,this_col_to_try]])
if get_interpretability:
self.get_interpretation(self.result, self.X.columns
, df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
for col in list(filter(lambda x: this_col_to_try in x,result.params.index)):
if get_interpretability:
dic[col] = [result.params[col],result.pvalues[col],self.feature_interpretability_df['Probability'][col],
self.feature_interpretability_df['Probability']['base']]
else:
dic[col] = [result.params[col], result.pvalues[col]]
df_one_at_a_time = pd.DataFrame(dic).T
if get_interpretability:
df_one_at_a_time.columns = ['Coefficient','Pvalue','Controlled Probability','Base Probability']
else:
df_one_at_a_time.columns = ['Coefficient','Pvalue']
self.df_one_at_a_time = df_one_at_a_time
return df_one_at_a_time
def unit_test_1():
print('Unit test 1...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05)
my_linear_regresion_class.lin_reg_basic()
result_required = [110.08, 3.74, -35.75, 2.54, -0.17, 5.51, 10.21]
result_actual = list(my_linear_regresion_class.basic_result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 34.69
result_actual = my_linear_regresion_class.basic_result.predict(my_linear_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.36
neg_mean_squared_error = -1812.52
neg_root_mean_squared_error = -41.66'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X,
my_linear_regresion_class.y)
assert (result_required == result_actual)
result_required = [34.69, 38.44, -1.05, 37.23, 34.52, 40.21, 44.9]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [-1.05, 34.52, 37.23, 38.44, 40.21, 44.9]
result_actual = sorted(list(my_linear_regresion_class.feature_interpretability_comparative_df['Prediction_add1_or_with']))
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_2():
print('Unit test 2...')
import sys
import os
import warnings
np.random.seed(101)
warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Survived',sig_level=0.05)
my_linear_regresion_class.lin_reg()
result_required = [0.88, -0.19, 0.49, -0.01, -0.05, -0.01, 0.0]
result_actual = list(my_linear_regresion_class.basic_result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 0.4061624649859944
result_actual = my_linear_regresion_class.basic_result.predict(my_linear_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.36
neg_mean_squared_error = -0.15
neg_root_mean_squared_error = -0.39'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X,
my_linear_regresion_class.y)
assert (result_required == result_actual)
result_required = [0.41, 0.21, 0.89, 0.4, 0.35, 0.39, 0.41]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_3():
print('Unit test 3...')
import sys
import os
import warnings
np.random.seed(101)
warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05)
my_linear_regresion_class.lin_reg()
result_required = [112.61, 3.74, -35.75, -0.17, 5.51, 10.21, -2.54]
result_actual = list(my_linear_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 34.69
result_actual = my_linear_regresion_class.result.predict(my_linear_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.36
neg_mean_squared_error = -1812.52
neg_root_mean_squared_error = -41.66'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X,
my_linear_regresion_class.y)
assert (result_required == result_actual)
result_required = [34.69, 38.44, -1.05, 33.77, 34.52, 40.21, 44.9]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [-1.05, 33.77, 34.52, 38.44, 40.21, 44.9]
result_actual = sorted(list(my_linear_regresion_class.feature_interpretability_comparative_df['Prediction_add1_or_with']))
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [1.0, 0.83, -2.94, -3.65, -4.17, -4.59, -8.48, -8.77, -10.16]
result_actual = sorted(list(my_linear_regresion_class.predict_from_original(df)))[:-10:-1]
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_4():
print('Unit test 4...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05)
my_linear_regresion_class.lin_reg_with_feature_selection(verbose=False)
result_required = [106.7, -35.73, 11.05, 6.15]
result_actual = list(my_linear_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 34.69
result_actual = my_linear_regresion_class.result_with_feature_selection.predict(my_linear_regresion_class.X_with_feature_selection).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.37
neg_mean_squared_error = -1798.9
neg_root_mean_squared_error = -41.44'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X_with_feature_selection,
my_linear_regresion_class.y_with_feature_selection)
assert (result_required == result_actual)
result_required = [34.69, -1.04, 40.84, 45.75]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [14.62, 4.81, 4.81, 4.81, -1.34, -1.34, -7.48, -7.48, -7.48]
result_actual = sorted(list(my_linear_regresion_class.predict_from_original(df)))[:-10:-1]
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_5():
print('Unit test 5...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
my_linear_regresion_class = LinearRegressionClass(df,'Survived',sig_level=0.05)
my_linear_regresion_class.lin_reg()
result_required = [1.36, -0.19, -0.01, -0.05, -0.01, 0.0, -0.49]
result_actual = list(my_linear_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_6():
print('Unit test 6...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05,cols_to_keep_static=['Pclass'])
my_linear_regresion_class.lin_reg_with_feature_selection(verbose=False)
result_required = [110.47, -66.41, -75.03, -30.54, -14.45, 4.2]
result_actual = list(my_linear_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_7():
print('Unit test 7...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05,cols_to_keep_static=['Pclass'],
cols_to_try_individually=['Parch','Sex','Age','Fare'],
max_iter=1000)
my_linear_regresion_class.lin_reg_one_at_a_time(with_feature_selection=True)
result_required = [-36.41, -17.25, 22.02, 34.63, -0.51]
result_actual = list(my_linear_regresion_class.df_one_at_a_time['Coefficient'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_8():
print('Unit test 8...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = | pd.read_csv(titanic_csv) | pandas.read_csv |
import pandas as pd
import numpy as np
from scipy import integrate, stats
from numpy import absolute, mean
from itertools import islice
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.stats.multicomp
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
import os
## Note: MAD isn't working; can come back to it later. See behavanalysis_part3 line 50 for examples
## This calculates 2 different ways of looking at reaction time: the first trial after a switch, "first_switch_trial",
# and the average of the first three switch trials, "average_switch_trial."
def create_df3(raw_data_location3):
raw_data_location3 = open(r'C:\Users\danie\Documents\SURREY\Project_1\TaskSwitchingParadigm\online_TSP\second_online_cohort\pilot2_withoccurence.csv')
path = (r'C:\Users\danie\Documents\SURREY\Project_1\TaskSwitchingParadigm\online_TSP\second_online_cohort')
df = pd.read_csv(raw_data_location3, header = 0)
df_behavstats = pd.DataFrame()
df_behavstats1 = pd.DataFrame()
df_behavstats2 = | pd.DataFrame() | pandas.DataFrame |
import re
import warnings
from datetime import datetime, timedelta
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from woodwork.logical_types import Double, Integer
from rayml.exceptions import (
MethodPropertyNotFoundError,
MissingComponentError,
ParameterNotUsedWarning,
)
from rayml.pipelines import ComponentGraph
from rayml.pipelines.components import (
DateTimeFeaturizer,
DropRowsTransformer,
ElasticNetClassifier,
Estimator,
Imputer,
LogisticRegressionClassifier,
NaturalLanguageFeaturizer,
OneHotEncoder,
RandomForestClassifier,
SelectColumns,
StandardScaler,
TargetImputer,
Transformer,
Undersampler,
)
from rayml.problem_types import is_classification
from rayml.utils import infer_feature_types
class DummyTransformer(Transformer):
name = "Dummy Transformer"
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
def transform(self, X, y=None):
return X
class TransformerA(DummyTransformer):
"""copy class"""
class TransformerB(DummyTransformer):
"""copy class"""
class TransformerC(DummyTransformer):
"""copy class"""
class DummyEstimator(Estimator):
name = "Dummy Estimator"
model_family = None
supported_problem_types = None
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
class EstimatorA(DummyEstimator):
"""copy class"""
class EstimatorB(DummyEstimator):
"""copy class"""
class EstimatorC(DummyEstimator):
"""copy class"""
@pytest.fixture
def dummy_components():
return TransformerA, TransformerB, TransformerC, EstimatorA, EstimatorB, EstimatorC
def test_init(example_graph):
comp_graph = ComponentGraph()
assert len(comp_graph.component_dict) == 0
graph = example_graph
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_str_components():
graph = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "OneHot_ElasticNet.x", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_instantiated():
graph = {
"Imputer": [
Imputer(numeric_impute_strategy="constant", numeric_fill_value=0),
"X",
"y",
]
}
component_graph = ComponentGraph(graph)
component_graph.instantiate(
{"Imputer": {"numeric_fill_value": 10, "categorical_fill_value": "Fill"}}
)
cg_imputer = component_graph.get_component("Imputer")
assert graph["Imputer"][0] == cg_imputer
assert cg_imputer.parameters["numeric_fill_value"] == 0
assert cg_imputer.parameters["categorical_fill_value"] is None
def test_invalid_init():
invalid_graph = {"Imputer": [Imputer, "X", "y"], "OHE": OneHotEncoder}
with pytest.raises(
ValueError, match="All component information should be passed in as a list"
):
ComponentGraph(invalid_graph)
graph = {
"Imputer": [
None,
"X",
"y",
]
}
with pytest.raises(
ValueError, match="may only contain str or ComponentBase subclasses"
):
ComponentGraph(graph)
graph = {
"Fake": ["Fake Component", "X", "y"],
"Estimator": [ElasticNetClassifier, "Fake.x", "y"],
}
with pytest.raises(MissingComponentError):
ComponentGraph(graph)
def test_init_bad_graphs():
graph_with_cycle = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "Estimator.x", "y"],
"Estimator": [RandomForestClassifier, "OHE.x", "y"],
}
with pytest.raises(ValueError, match="given graph contains a cycle"):
ComponentGraph(graph_with_cycle)
graph_with_more_than_one_final_component = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
with pytest.raises(ValueError, match="graph has more than one final"):
ComponentGraph(graph_with_more_than_one_final_component)
graph_with_unconnected_imputer = {
"Imputer": ["Imputer", "X", "y"],
"DateTime": ["DateTime Featurizer", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"DateTime.x",
"y",
],
}
with pytest.raises(ValueError, match="The given graph is not completely connected"):
ComponentGraph(graph_with_unconnected_imputer)
def test_order_x_and_y():
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
assert component_graph.compute_order == ["Imputer", "OHE", "Random Forest"]
def test_list_raises_error():
component_list = ["Imputer", "One Hot Encoder", RandomForestClassifier]
with pytest.raises(
ValueError,
match="component_dict must be a dictionary which specifies the components and edges between components",
):
ComponentGraph(component_list)
def test_instantiate_with_parameters(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert not isinstance(component_graph.get_component("Imputer"), Imputer)
assert not isinstance(
component_graph.get_component("Elastic Net"), ElasticNetClassifier
)
parameters = {
"OneHot_RandomForest": {"top_n": 3},
"OneHot_ElasticNet": {"top_n": 5},
"Elastic Net": {"max_iter": 100},
}
component_graph.instantiate(parameters)
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
assert isinstance(component_graph.get_component("Imputer"), Imputer)
assert isinstance(
component_graph.get_component("Random Forest"), RandomForestClassifier
)
assert isinstance(
component_graph.get_component("Logistic Regression Classifier"),
LogisticRegressionClassifier,
)
assert component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 3
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 5
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 100
@pytest.mark.parametrize("parameters", [None, {}])
def test_instantiate_without_parameters(parameters, example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
if parameters is not None:
component_graph.instantiate(parameters)
else:
component_graph.instantiate()
assert (
component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 10
)
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 10
assert component_graph.get_component(
"OneHot_RandomForest"
) is not component_graph.get_component("OneHot_ElasticNet")
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
def test_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
with pytest.raises(ValueError, match="Cannot reinstantiate a component graph"):
component_graph.instantiate({"OneHot": {"top_n": 7}})
def test_bad_instantiate_can_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Error received when instantiating component"):
component_graph.instantiate(
parameters={"Elastic Net": {"max_iter": 100, "fake_param": None}}
)
component_graph.instantiate({"Elastic Net": {"max_iter": 22}})
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 22
def test_get_component(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder
assert (
component_graph.get_component("Logistic Regression Classifier")
== LogisticRegressionClassifier
)
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_component("Fake Component")
component_graph.instantiate(
{
"OneHot_RandomForest": {"top_n": 3},
"Random Forest": {"max_depth": 4, "n_estimators": 50},
}
)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder()
assert component_graph.get_component("OneHot_RandomForest") == OneHotEncoder(
top_n=3
)
assert component_graph.get_component("Random Forest") == RandomForestClassifier(
n_estimators=50, max_depth=4
)
def test_get_estimators(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Cannot get estimators until"):
component_graph.get_estimators()
component_graph.instantiate()
assert component_graph.get_estimators() == [
RandomForestClassifier(),
ElasticNetClassifier(),
LogisticRegressionClassifier(),
]
component_graph = ComponentGraph({"Imputer": ["Imputer", "X", "y"]})
component_graph.instantiate()
assert component_graph.get_estimators() == []
def test_parents(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
component_graph.instantiate()
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
def test_get_last_component(example_graph):
component_graph = ComponentGraph()
with pytest.raises(
ValueError, match="Cannot get last component from edgeless graph"
):
component_graph.get_last_component()
component_graph = ComponentGraph(example_graph)
assert component_graph.get_last_component() == LogisticRegressionClassifier
component_graph.instantiate()
assert component_graph.get_last_component() == LogisticRegressionClassifier()
component_graph = ComponentGraph({"Imputer": [Imputer, "X", "y"]})
assert component_graph.get_last_component() == Imputer
component_graph = ComponentGraph(
{"Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"]}
)
assert component_graph.get_last_component() == OneHotEncoder
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_fit_component_graph(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
mock_fit_transform.return_value = pd.DataFrame(X)
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 3
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.TargetImputer.fit_transform")
@patch("rayml.pipelines.components.OneHotEncoder.fit_transform")
def test_fit_correct_inputs(
mock_ohe_fit_transform, mock_imputer_fit_transform, X_y_binary
):
X, y = X_y_binary
X = pd.DataFrame(X)
y = pd.Series(y)
graph = {
"Target Imputer": [TargetImputer, "X", "y"],
"OHE": [OneHotEncoder, "Target Imputer.x", "Target Imputer.y"],
}
expected_x = pd.DataFrame(index=X.index, columns=X.columns).fillna(1.0)
expected_x.ww.init()
expected_y = pd.Series(index=y.index).fillna(0)
mock_imputer_fit_transform.return_value = tuple((expected_x, expected_y))
mock_ohe_fit_transform.return_value = expected_x
component_graph = ComponentGraph(graph).instantiate()
component_graph.fit(X, y)
assert_frame_equal(expected_x, mock_ohe_fit_transform.call_args[0][0])
assert_series_equal(expected_y, mock_ohe_fit_transform.call_args[0][1])
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_component_graph_fit_and_transform_all_but_final(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
mock_X_t = pd.DataFrame(np.ones(pd.DataFrame(X).shape))
mock_fit_transform.return_value = mock_X_t
mock_fit.return_value = Estimator
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph.fit_and_transform_all_but_final(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 2
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict(mock_predict, mock_predict_proba, mock_fit, example_graph, X_y_binary):
X, y = X_y_binary
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 4
) # Called twice when fitting pipeline, twice when predicting
assert mock_predict.call_count == 1 # Called once during predict
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_multiclass(
mock_predict, mock_predict_proba, mock_fit, example_graph, X_y_multi
):
X, y = X_y_multi
mock_predict_proba.return_value = pd.DataFrame(
{
0: np.full(X.shape[0], 0.33),
1: np.full(X.shape[0], 0.33),
2: np.full(X.shape[0], 0.33),
}
)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
final_estimator_input = component_graph.transform_all_but_final(X, y)
assert final_estimator_input.columns.to_list() == [
"Col 0 Random Forest.x",
"Col 1 Random Forest.x",
"Col 2 Random Forest.x",
"Col 0 Elastic Net.x",
"Col 1 Elastic Net.x",
"Col 2 Elastic Net.x",
]
for col in final_estimator_input:
assert np.array_equal(
final_estimator_input[col].to_numpy(), np.full(X.shape[0], 0.33)
)
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 6
) # Called twice when fitting pipeline, twice to compute final features, and twice when predicting
assert mock_predict.call_count == 1 # Called once during predict
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_regression(
mock_predict, mock_predict_proba, mock_fit, example_regression_graph, X_y_multi
):
X, y = X_y_multi
mock_predict.return_value = pd.Series(y)
mock_predict_proba.side_effect = MethodPropertyNotFoundError
component_graph = ComponentGraph(example_regression_graph).instantiate()
component_graph.fit(X, y)
final_estimator_input = component_graph.transform_all_but_final(X, y)
assert final_estimator_input.columns.to_list() == [
"Random Forest.x",
"Elastic Net.x",
]
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 6
) # Called twice when fitting pipeline, twice to compute final features, and twice when predicting
assert (
mock_predict.call_count == 7
) # Called because `predict_proba` does not exist for regresssions
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_repeat_estimator(
mock_predict, mock_predict_proba, mock_fit, X_y_binary
):
X, y = X_y_binary
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
graph = {
"Imputer": [Imputer, "X", "y"],
"OneHot_RandomForest": [OneHotEncoder, "Imputer.x", "y"],
"OneHot_Logistic": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OneHot_RandomForest.x", "y"],
"Logistic Regression Classifier": [
LogisticRegressionClassifier,
"OneHot_Logistic.x",
"y",
],
"Final Estimator": [
LogisticRegressionClassifier,
"Random Forest.x",
"Logistic Regression Classifier.x",
"y",
],
}
component_graph = ComponentGraph(graph)
component_graph.instantiate()
component_graph.fit(X, y)
assert (
not component_graph.get_component(
"Logistic Regression Classifier"
)._component_obj
== component_graph.get_component("Final Estimator")._component_obj
)
component_graph.predict(X)
assert mock_predict_proba.call_count == 4
assert mock_predict.call_count == 1
assert mock_fit.call_count == 3
@patch("rayml.pipelines.components.Imputer.transform")
@patch("rayml.pipelines.components.OneHotEncoder.transform")
@patch("rayml.pipelines.components.RandomForestClassifier.predict_proba")
@patch("rayml.pipelines.components.ElasticNetClassifier.predict_proba")
def test_transform_all_but_final(
mock_en_predict_proba,
mock_rf_predict_proba,
mock_ohe,
mock_imputer,
example_graph,
X_y_binary,
):
X, y = X_y_binary
mock_imputer.return_value = pd.DataFrame(X)
mock_ohe.return_value = pd.DataFrame(X)
mock_en_predict_proba.return_value = pd.DataFrame(
({0: np.zeros(X.shape[0]), 1: np.ones(X.shape[0])})
)
mock_en_predict_proba.return_value.ww.init()
mock_rf_predict_proba.return_value = pd.DataFrame(
({0: np.ones(X.shape[0]), 1: np.zeros(X.shape[0])})
)
mock_rf_predict_proba.return_value.ww.init()
X_expected = pd.DataFrame(
{
"Col 1 Random Forest.x": np.zeros(X.shape[0]),
"Col 1 Elastic Net.x": np.ones(X.shape[0]),
}
)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform_all_but_final(X)
assert_frame_equal(X_expected, X_t)
assert mock_imputer.call_count == 2
assert mock_ohe.call_count == 4
@patch(f"{__name__}.DummyTransformer.transform")
def test_transform_all_but_final_single_component(mock_transform, X_y_binary):
X, y = X_y_binary
X = pd.DataFrame(X)
mock_transform.return_value = X
component_graph = ComponentGraph(
{"Dummy Component": [DummyTransformer, "X", "y"]}
).instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform_all_but_final(X)
assert_frame_equal(X, X_t)
@patch("rayml.pipelines.components.Imputer.fit_transform")
def test_fit_y_parent(mock_fit_transform, X_y_binary):
X, y = X_y_binary
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
mock_fit_transform.return_value = tuple((pd.DataFrame(X), pd.Series(y)))
component_graph.fit(X, y)
mock_fit_transform.assert_called_once()
def test_predict_empty_graph(X_y_binary):
X, y = X_y_binary
X = pd.DataFrame(X)
component_graph = ComponentGraph()
component_graph.instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform(X, y)
assert_frame_equal(X, X_t)
X_pred = component_graph.predict(X)
| assert_frame_equal(X, X_pred) | pandas.testing.assert_frame_equal |
import pandas as pd
from pydatafaker import utilities
def test_create_date():
x = utilities.create_date()
assert type(x) is pd.Timestamp
def test_create_date_ranges():
sep_1 = "2020-09-01"
sep_2 = "2020-09-02"
sep_3 = "2020-09-03"
for _ in range(25):
x = utilities.create_date(sep_1, sep_3)
assert (
x == pd.to_datetime(sep_1)
or x == pd.to_datetime(sep_2)
or x == | pd.to_datetime(sep_3) | pandas.to_datetime |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 12})
plt.rcParams["figure.figsize"] = (3.5,3)
METRIC_IDX = 3
NUM_EPOCHS = 20
NUM_EXPS = 5
GRAPH_FORMAT = 'pdf'
# GRAPH_TITLE = 'Piano Playing'
# GRAPH_FILE = 'piano_playing'
GRAPH_TITLE = 'Keyboard Typing'
GRAPH_FILE = 'keyboard_typing'
METHODS_NAMES = ['SF', 'MF', 'CBMF']
NUM_METHODS = len(METHODS_NAMES)
METRICS_NAMES = ['Accuracy', 'Recall', 'Precision', 'F1']
METRICS_COLUMNS = ['acc', 'rec', 'pre', 'f1']
if GRAPH_FILE == 'piano_playing':
methods_folders = [['/mnt/walkure_public/deanz/models/deepnet/us2multimidi_all/deepnetunet_224res_1imgs_calib_all_multiplaying_01_st0.8_kf4',
'/mnt/walkure_public/deanz/models/deepnet/us2multimidi_all/deepnetunet_224res_1imgs_calib_all_multiplaying_02_st0.8_kf0',
'/mnt/walkure_public/deanz/models/deepnet/us2multimidi_all/deepnetunet_224res_1imgs_calib_all_multiplaying_03_st0.8_kf1',
'/mnt/walkure_public/deanz/models/deepnet/us2multimidi_all/deepnetunet_224res_1imgs_calib_all_multiplaying_04_st0.8_kf2',
'/mnt/walkure_public/deanz/models/deepnet/us2multimidi_all/deepnetunet_224res_1imgs_calib_all_multiplaying_05_st0.8_kf3'],
['/mnt/walkure_public/deanz/models/mfm/us2multimidi_all/mfmunet_224res_8imgs_calib_all_multiplaying_03_st0.8_sequence_kf4',
'/mnt/walkure_public/deanz/models/mfm/us2multimidi_all/mfmunet_224res_8imgs_calib_all_multiplaying_04_st0.8_sequence_kf1',
'/mnt/walkure_public/deanz/models/mfm/us2multimidi_all/mfmunet_224res_8imgs_calib_all_multiplaying_05_st0.8_sequence_kf0',
'/mnt/walkure_public/deanz/models/mfm/us2multimidi_all/mfmunet_224res_8imgs_calib_all_multiplaying_06_st0.8_sequence_kf2',
'/mnt/walkure_public/deanz/models/mfm/us2multimidi_all/mfmunet_224res_8imgs_calib_all_multiplaying_07_st0.8_sequence_kf3'],
['/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_13_st0.8_sequence_reslayer_retrained_mp_4qloss_kf4',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_14_st0.8_sequence_reslayer_retrained_mp_4qloss_kf0',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_15_st0.8_sequence_reslayer_retrained_mp_4qloss_kf1',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_16_st0.8_sequence_reslayer_retrained_mp_4qloss_kf2',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_17_st0.8_sequence_reslayer_retrained_mp_4qloss_kf3']]
else:
methods_folders = [['/mnt/walkure_public/deanz/models/deepnet/us2multikey_all/deepnetunet_224res_1imgs_calib_all_multityping_01_st0.8_kf4',
'/mnt/walkure_public/deanz/models/deepnet/us2multikey_all/deepnetunet_224res_1imgs_calib_all_multityping_02_st0.8_kf0',
'/mnt/walkure_public/deanz/models/deepnet/us2multikey_all/deepnetunet_224res_1imgs_calib_all_multityping_03_st0.8_kf1',
'/mnt/walkure_public/deanz/models/deepnet/us2multikey_all/deepnetunet_224res_1imgs_calib_all_multityping_04_st0.8_kf2',
'/mnt/walkure_public/deanz/models/deepnet/us2multikey_all/deepnetunet_224res_1imgs_calib_all_multityping_05_st0.8_kf3'],
['/mnt/walkure_public/deanz/models/mfm/us2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_05_st0.8_sequence_kf4',
'/mnt/walkure_public/deanz/models/mfm/us2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_06_st0.8_sequence_kf1',
'/mnt/walkure_public/deanz/models/mfm/us2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_07_st0.8_sequence_kf0',
'/mnt/walkure_public/deanz/models/mfm/us2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_08_st0.8_sequence_kf2',
'/mnt/walkure_public/deanz/models/mfm/us2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_09_st0.8_sequence_kf3'],
['/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_23_st0.8_sequence_reslayer_retrained_mt_4qloss_kf4',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_24_st0.8_sequence_reslayer_retrained_mt_4qloss_kf0',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_25_st0.8_sequence_reslayer_retrained_mt_4qloss_kf1',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_26_st0.8_sequence_reslayer_retrained_mt_4qloss_kf2',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_27_st0.8_sequence_reslayer_retrained_mt_4qloss_kf3']]
methods_dfs = {}
for i, method_exps in enumerate(methods_folders):
# append all k-fold xperiments into one dataframe
method_df = | pd.DataFrame(columns=['acc','rec','pre','f1']) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC overloads for common algorithms used internally
"""
import numpy
import pandas
from pandas.core.indexing import IndexingError
import numba
from numba.misc import quicksort
from numba import types
from numba.core.errors import TypingError
from numba.extending import register_jitable
from numba.np import numpy_support
from numba.typed import Dict
import sdc
from sdc.hiframes.api import isna
from sdc.hiframes.pd_series_type import SeriesType
from sdc.functions import numpy_like
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.range_index_type import RangeIndexType
from sdc.str_arr_ext import (num_total_chars, append_string_array_to,
str_arr_is_na, pre_alloc_string_array, str_arr_set_na, string_array_type,
cp_str_list_to_array, create_str_arr_from_list, get_utf8_size,
str_arr_set_na_by_mask)
from sdc.utilities.prange_utils import parallel_chunks
from sdc.utilities.utils import sdc_overload, sdc_register_jitable
from sdc.utilities.sdc_typing_utils import (find_common_dtype_from_numpy_dtypes,
TypeChecker)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def hpat_arrays_append(A, B):
pass
@sdc_overload(hpat_arrays_append, jit_options={'parallel': False})
def hpat_arrays_append_overload(A, B):
"""Function for appending underlying arrays (A and B) or list/tuple of arrays B to an array A"""
A_is_range_index = isinstance(A, RangeIndexType)
B_is_range_index = isinstance(B, RangeIndexType)
if isinstance(A, (types.Array, RangeIndexType)):
if isinstance(B, (types.Array, RangeIndexType)):
def _append_single_numeric_impl(A, B):
_A = A.values if A_is_range_index == True else A # noqa
_B = B.values if B_is_range_index == True else B # noqa
return numpy.concatenate((_A, _B,))
return _append_single_numeric_impl
elif isinstance(B, (types.UniTuple, types.List)) and isinstance(B.dtype, (types.Array, RangeIndexType)):
B_dtype_is_range_index = isinstance(B.dtype, RangeIndexType)
numba_common_dtype = find_common_dtype_from_numpy_dtypes([A.dtype, B.dtype.dtype], [])
# TODO: refactor to use numpy.concatenate when Numba supports building a tuple at runtime
def _append_list_numeric_impl(A, B):
total_length = len(A) + numpy.array([len(arr) for arr in B]).sum()
new_data = numpy.empty(total_length, numba_common_dtype)
stop = len(A)
_A = numpy.array(A) if A_is_range_index == True else A # noqa
new_data[:stop] = _A
for arr in B:
_arr = numpy.array(arr) if B_dtype_is_range_index == True else arr # noqa
start = stop
stop = start + len(_arr)
new_data[start:stop] = _arr
return new_data
return _append_list_numeric_impl
elif A == string_array_type:
if B == string_array_type:
def _append_single_string_array_impl(A, B):
total_size = len(A) + len(B)
total_chars = num_total_chars(A) + num_total_chars(B)
new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)
pos = 0
pos += append_string_array_to(new_data, pos, A)
pos += append_string_array_to(new_data, pos, B)
return new_data
return _append_single_string_array_impl
elif (isinstance(B, (types.UniTuple, types.List)) and B.dtype == string_array_type):
def _append_list_string_array_impl(A, B):
array_list = [A] + list(B)
total_size = numpy.array([len(arr) for arr in array_list]).sum()
total_chars = numpy.array([num_total_chars(arr) for arr in array_list]).sum()
new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)
pos = 0
pos += append_string_array_to(new_data, pos, A)
for arr in B:
pos += append_string_array_to(new_data, pos, arr)
return new_data
return _append_list_string_array_impl
@sdc_register_jitable
def fill_array(data, size, fill_value=numpy.nan, push_back=True):
"""
Fill array with given values to reach the size
"""
if push_back:
return numpy.append(data, numpy.repeat(fill_value, size - data.size))
return numpy.append(numpy.repeat(fill_value, size - data.size), data)
@sdc_register_jitable
def fill_str_array(data, size, push_back=True):
"""
Fill StringArrayType array with given values to reach the size
"""
string_array_size = len(data)
nan_array_size = size - string_array_size
num_chars = sdc.str_arr_ext.num_total_chars(data)
result_data = sdc.str_arr_ext.pre_alloc_string_array(size, num_chars)
# Keep NaN values of initial array
arr_is_na_mask = numpy.array([sdc.hiframes.api.isna(data, i) for i in range(string_array_size)])
data_str_list = sdc.str_arr_ext.to_string_list(data)
nan_list = [''] * nan_array_size
result_list = data_str_list + nan_list if push_back else nan_list + data_str_list
cp_str_list_to_array(result_data, result_list)
# Batch=64 iteration to avoid threads competition
batch_size = 64
if push_back:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < string_array_size:
if arr_is_na_mask[j]:
str_arr_set_na(result_data, j)
else:
str_arr_set_na(result_data, j)
else:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < nan_array_size:
str_arr_set_na(result_data, j)
else:
str_arr_j = j - nan_array_size
if arr_is_na_mask[str_arr_j]:
str_arr_set_na(result_data, j)
return result_data
@numba.njit
def _hpat_ensure_array_capacity(new_size, arr):
""" Function ensuring that the size of numpy array is at least as specified
Returns newly allocated array of bigger size with copied elements if existing size is less than requested
"""
k = len(arr)
if k >= new_size:
return arr
n = k
while n < new_size:
n = 2 * n
res = numpy.empty(n, arr.dtype)
res[:k] = arr[:k]
return res
def sdc_join_series_indexes(left, right):
pass
@sdc_overload(sdc_join_series_indexes, jit_options={'parallel': False})
def sdc_join_series_indexes_overload(left, right):
"""Function for joining arrays left and right in a way similar to pandas.join 'outer' algorithm"""
# check that both operands are of types used for representing Pandas indexes
if not (isinstance(left, (types.Array, StringArrayType, RangeIndexType))
and isinstance(right, (types.Array, StringArrayType, RangeIndexType))):
return None
convert_left = isinstance(left, RangeIndexType)
convert_right = isinstance(right, RangeIndexType)
def _convert_to_arrays_impl(left, right):
_left = left.values if convert_left == True else left # noqa
_right = right.values if convert_right == True else right # noqa
return sdc_join_series_indexes(_left, _right)
if isinstance(left, RangeIndexType) and isinstance(right, RangeIndexType):
def sdc_join_range_indexes_impl(left, right):
if (left is right or numpy_like.array_equal(left, right)):
joined = left.values
lidx = numpy.arange(len(joined))
ridx = lidx
return joined, lidx, ridx
else:
return sdc_join_series_indexes(left.values, right.values)
return sdc_join_range_indexes_impl
elif isinstance(left, RangeIndexType) and isinstance(right, types.Array):
return _convert_to_arrays_impl
elif isinstance(left, types.Array) and isinstance(right, RangeIndexType):
return _convert_to_arrays_impl
# TODO: remove code duplication below and merge numeric and StringArray impls into one
# needs equivalents of numpy.arsort and _hpat_ensure_array_capacity for StringArrays
elif isinstance(left, types.Array) and isinstance(right, types.Array):
numba_common_dtype = find_common_dtype_from_numpy_dtypes([left.dtype, right.dtype], [])
if isinstance(numba_common_dtype, types.Number):
def sdc_join_series_indexes_impl(left, right):
# allocate result arrays
lsize = len(left)
rsize = len(right)
est_total_size = int(1.1 * (lsize + rsize))
lidx = numpy.empty(est_total_size, numpy.int64)
ridx = numpy.empty(est_total_size, numpy.int64)
joined = numpy.empty(est_total_size, numba_common_dtype)
left_nan = []
right_nan = []
for i in range(lsize):
if numpy.isnan(left[i]):
left_nan.append(i)
for i in range(rsize):
if numpy.isnan(right[i]):
right_nan.append(i)
# sort arrays saving the old positions
sorted_left = numpy.argsort(left, kind='mergesort')
sorted_right = numpy.argsort(right, kind='mergesort')
# put the position of the nans in an increasing sequence
sorted_left[lsize-len(left_nan):] = left_nan
sorted_right[rsize-len(right_nan):] = right_nan
i, j, k = 0, 0, 0
while (i < lsize and j < rsize):
joined = _hpat_ensure_array_capacity(k + 1, joined)
lidx = _hpat_ensure_array_capacity(k + 1, lidx)
ridx = _hpat_ensure_array_capacity(k + 1, ridx)
left_index = left[sorted_left[i]]
right_index = right[sorted_right[j]]
if (left_index < right_index) or numpy.isnan(right_index):
joined[k] = left_index
lidx[k] = sorted_left[i]
ridx[k] = -1
i += 1
k += 1
elif (left_index > right_index) or numpy.isnan(left_index):
joined[k] = right_index
lidx[k] = -1
ridx[k] = sorted_right[j]
j += 1
k += 1
else:
# find ends of sequences of equal index values in left and right
ni, nj = i, j
while (ni < lsize and left[sorted_left[ni]] == left_index):
ni += 1
while (nj < rsize and right[sorted_right[nj]] == right_index):
nj += 1
# join the blocks found into results
for s in numpy.arange(i, ni, 1):
block_size = nj - j
to_joined = numpy.repeat(left_index, block_size)
to_lidx = numpy.repeat(sorted_left[s], block_size)
to_ridx = numpy.array([sorted_right[k] for k in numpy.arange(j, nj, 1)], numpy.int64)
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
joined[k:k + block_size] = to_joined
lidx[k:k + block_size] = to_lidx
ridx[k:k + block_size] = to_ridx
k += block_size
i = ni
j = nj
# fill the end of joined with remaining part of left or right
if i < lsize:
block_size = lsize - i
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
ridx[k: k + block_size] = numpy.repeat(-1, block_size)
while i < lsize:
joined[k] = left[sorted_left[i]]
lidx[k] = sorted_left[i]
i += 1
k += 1
elif j < rsize:
block_size = rsize - j
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
lidx[k: k + block_size] = numpy.repeat(-1, block_size)
while j < rsize:
joined[k] = right[sorted_right[j]]
ridx[k] = sorted_right[j]
j += 1
k += 1
return joined[:k], lidx[:k], ridx[:k]
return sdc_join_series_indexes_impl
else:
return None
elif (left == string_array_type and right == string_array_type):
def sdc_join_series_indexes_impl(left, right):
# allocate result arrays
lsize = len(left)
rsize = len(right)
est_total_size = int(1.1 * (lsize + rsize))
lidx = numpy.empty(est_total_size, numpy.int64)
ridx = numpy.empty(est_total_size, numpy.int64)
# use Series.sort_values since argsort for StringArrays not implemented
original_left_series = pandas.Series(left)
original_right_series = pandas.Series(right)
# sort arrays saving the old positions
left_series = original_left_series.sort_values(kind='mergesort')
right_series = original_right_series.sort_values(kind='mergesort')
sorted_left = left_series._index
sorted_right = right_series._index
i, j, k = 0, 0, 0
while (i < lsize and j < rsize):
lidx = _hpat_ensure_array_capacity(k + 1, lidx)
ridx = _hpat_ensure_array_capacity(k + 1, ridx)
left_index = left[sorted_left[i]]
right_index = right[sorted_right[j]]
if (left_index < right_index):
lidx[k] = sorted_left[i]
ridx[k] = -1
i += 1
k += 1
elif (left_index > right_index):
lidx[k] = -1
ridx[k] = sorted_right[j]
j += 1
k += 1
else:
# find ends of sequences of equal index values in left and right
ni, nj = i, j
while (ni < lsize and left[sorted_left[ni]] == left_index):
ni += 1
while (nj < rsize and right[sorted_right[nj]] == right_index):
nj += 1
# join the blocks found into results
for s in numpy.arange(i, ni, 1):
block_size = nj - j
to_lidx = numpy.repeat(sorted_left[s], block_size)
to_ridx = numpy.array([sorted_right[k] for k in numpy.arange(j, nj, 1)], numpy.int64)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
lidx[k:k + block_size] = to_lidx
ridx[k:k + block_size] = to_ridx
k += block_size
i = ni
j = nj
# fill the end of joined with remaining part of left or right
if i < lsize:
block_size = lsize - i
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
ridx[k: k + block_size] = numpy.repeat(-1, block_size)
while i < lsize:
lidx[k] = sorted_left[i]
i += 1
k += 1
elif j < rsize:
block_size = rsize - j
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
lidx[k: k + block_size] = numpy.repeat(-1, block_size)
while j < rsize:
ridx[k] = sorted_right[j]
j += 1
k += 1
# count total number of characters and allocate joined array
total_joined_size = k
num_chars_in_joined = 0
for i in numpy.arange(total_joined_size):
if lidx[i] != -1:
num_chars_in_joined += len(left[lidx[i]])
elif ridx[i] != -1:
num_chars_in_joined += len(right[ridx[i]])
joined = pre_alloc_string_array(total_joined_size, num_chars_in_joined)
# iterate over joined and fill it with indexes using lidx and ridx indexers
for i in numpy.arange(total_joined_size):
if lidx[i] != -1:
joined[i] = left[lidx[i]]
if (str_arr_is_na(left, lidx[i])):
str_arr_set_na(joined, i)
elif ridx[i] != -1:
joined[i] = right[ridx[i]]
if (str_arr_is_na(right, ridx[i])):
str_arr_set_na(joined, i)
else:
str_arr_set_na(joined, i)
return joined, lidx, ridx
return sdc_join_series_indexes_impl
return None
@numba.njit
def _sdc_pandas_format_percentiles(arr):
""" Function converting float array of percentiles to a list of strings formatted
the same as in pandas.io.formats.format.format_percentiles
"""
percentiles_strs = []
for percentile in arr:
p_as_string = str(percentile * 100)
trim_index = len(p_as_string) - 1
while trim_index >= 0:
if p_as_string[trim_index] == '0':
trim_index -= 1
continue
elif p_as_string[trim_index] == '.':
break
trim_index += 1
break
if trim_index < 0:
p_as_string_trimmed = '0'
else:
p_as_string_trimmed = p_as_string[:trim_index]
percentiles_strs.append(p_as_string_trimmed + '%')
return percentiles_strs
def sdc_arrays_argsort(A, kind='quicksort'):
pass
@sdc_overload(sdc_arrays_argsort, jit_options={'parallel': False})
def sdc_arrays_argsort_overload(A, kind='quicksort'):
"""Function providing pandas argsort implementation for different 1D array types"""
# kind is not known at compile time, so get this function here and use in impl if needed
quicksort_func = quicksort.make_jit_quicksort().run_quicksort
kind_is_default = isinstance(kind, str)
if isinstance(A, types.Array):
def _sdc_arrays_argsort_array_impl(A, kind='quicksort'):
_kind = 'quicksort' if kind_is_default == True else kind # noqa
return numpy.argsort(A, kind=_kind)
return _sdc_arrays_argsort_array_impl
elif A == string_array_type:
def _sdc_arrays_argsort_str_arr_impl(A, kind='quicksort'):
nan_mask = sdc.hiframes.api.get_nan_mask(A)
idx = numpy.arange(len(A))
old_nan_positions = idx[nan_mask]
data = A[~nan_mask]
keys = idx[~nan_mask]
if kind == 'quicksort':
zipped = list(zip(list(data), list(keys)))
zipped = quicksort_func(zipped)
argsorted = [zipped[i][1] for i in numpy.arange(len(data))]
elif kind == 'mergesort':
sdc.hiframes.sort.local_sort((data, ), (keys, ))
argsorted = list(keys)
else:
raise ValueError("Unrecognized kind of sort in sdc_arrays_argsort")
argsorted.extend(old_nan_positions)
return numpy.asarray(argsorted, dtype=numpy.int32)
return _sdc_arrays_argsort_str_arr_impl
elif isinstance(A, types.List):
return None
return None
def _sdc_pandas_series_check_axis(axis):
pass
@sdc_overload(_sdc_pandas_series_check_axis, jit_options={'parallel': False})
def _sdc_pandas_series_check_axis_overload(axis):
if isinstance(axis, types.UnicodeType):
def _sdc_pandas_series_check_axis_impl(axis):
if axis != 'index':
raise ValueError("Method sort_values(). Unsupported parameter. Given axis != 'index'")
return _sdc_pandas_series_check_axis_impl
elif isinstance(axis, types.Integer):
def _sdc_pandas_series_check_axis_impl(axis):
if axis != 0:
raise ValueError("Method sort_values(). Unsupported parameter. Given axis != 0")
return _sdc_pandas_series_check_axis_impl
return None
def _sdc_asarray(data):
pass
@sdc_overload(_sdc_asarray)
def _sdc_asarray_overload(data):
# TODO: extend with other types
if not isinstance(data, types.List):
return None
if isinstance(data.dtype, types.UnicodeType):
def _sdc_asarray_impl(data):
return create_str_arr_from_list(data)
return _sdc_asarray_impl
else:
result_dtype = data.dtype
def _sdc_asarray_impl(data):
# TODO: check if elementwise copy is needed at all
res_size = len(data)
res_arr = numpy.empty(res_size, dtype=result_dtype)
for i in numba.prange(res_size):
res_arr[i] = data[i]
return res_arr
return _sdc_asarray_impl
return None
def _sdc_take(data, indexes):
pass
@sdc_overload(_sdc_take)
def _sdc_take_overload(data, indexes):
if not isinstance(data, (types.Array, StringArrayType, RangeIndexType)):
return None
if not (isinstance(indexes, (types.Array, types.List))
and isinstance(indexes.dtype, (types.Integer, types.ListType))):
return None
if isinstance(indexes.dtype, types.ListType) and isinstance(data, (types.Array, types.List, RangeIndexType)):
arr_dtype = data.dtype
def _sdc_take_list_impl(data, indexes):
res_size = 0
for i in numba.prange(len(indexes)):
res_size += len(indexes[i])
res_arr = numpy.empty(res_size, dtype=arr_dtype)
for i in numba.prange(len(indexes)):
start = 0
for l in range(len(indexes[0:i])):
start += len(indexes[l])
current_pos = start
for j in range(len(indexes[i])):
res_arr[current_pos] = data[indexes[i][j]]
current_pos += 1
return res_arr
return _sdc_take_list_impl
elif isinstance(indexes.dtype, types.ListType) and data == string_array_type:
def _sdc_take_list_str_impl(data, indexes):
res_size = 0
for i in numba.prange(len(indexes)):
res_size += len(indexes[i])
nan_mask = numpy.zeros(res_size, dtype=numpy.bool_)
num_total_bytes = 0
for i in numba.prange(len(indexes)):
start = 0
for l in range(len(indexes[0:i])):
start += len(indexes[l])
current_pos = start
for j in range(len(indexes[i])):
num_total_bytes += get_utf8_size(data[indexes[i][j]])
if isna(data, indexes[i][j]):
nan_mask[current_pos] = True
current_pos += 1
res_arr = pre_alloc_string_array(res_size, num_total_bytes)
for i in numba.prange(len(indexes)):
start = 0
for l in range(len(indexes[0:i])):
start += len(indexes[l])
current_pos = start
for j in range(len(indexes[i])):
res_arr[current_pos] = data[indexes[i][j]]
if nan_mask[current_pos]:
str_arr_set_na(res_arr, current_pos)
current_pos += 1
return res_arr
return _sdc_take_list_str_impl
elif isinstance(data, (types.Array, RangeIndexType)):
arr_dtype = data.dtype
def _sdc_take_array_impl(data, indexes):
res_size = len(indexes)
res_arr = numpy.empty(res_size, dtype=arr_dtype)
for i in numba.prange(res_size):
res_arr[i] = data[indexes[i]]
return res_arr
return _sdc_take_array_impl
elif isinstance(data, StringArrayType):
def _sdc_take_str_arr_impl(data, indexes):
res_size = len(indexes)
nan_mask = numpy.zeros(res_size, dtype=numpy.bool_)
num_total_bytes = 0
for i in numba.prange(res_size):
num_total_bytes += get_utf8_size(data[indexes[i]])
if isna(data, indexes[i]):
nan_mask[i] = True
res_arr = pre_alloc_string_array(res_size, num_total_bytes)
for i in numpy.arange(res_size):
res_arr[i] = data[indexes[i]]
if nan_mask[i]:
str_arr_set_na(res_arr, i)
return res_arr
return _sdc_take_str_arr_impl
return None
def _almost_equal(x, y):
"""Check if floats are almost equal based on the float epsilon"""
pass
@sdc_overload(_almost_equal)
def _almost_equal_overload(x, y):
ty_checker = TypeChecker('Function sdc.common_functions._almost_equal_overload().')
ty_checker.check(x, types.Float)
ty_checker.check(x, types.Float)
common_dtype = numpy.find_common_type([], [x.name, y.name])
def _almost_equal_impl(x, y):
return abs(x - y) <= numpy.finfo(common_dtype).eps
return _almost_equal_impl
def sdc_reindex_series(arr, index, name, by_index):
pass
@sdc_overload(sdc_reindex_series)
def sdc_reindex_series_overload(arr, index, name, by_index):
""" Reindexes series data by new index following the logic of pandas.core.indexing.check_bool_indexer """
range_indexes = isinstance(index, RangeIndexType) and isinstance(by_index, RangeIndexType)
data_dtype, index_dtype = arr.dtype, index.dtype
data_is_str_arr = isinstance(arr.dtype, types.UnicodeType)
def sdc_reindex_series_impl(arr, index, name, by_index):
# no reindexing is needed if indexes are equal
if range_indexes == True: # noqa
equal_indexes = numpy_like.array_equal(index, by_index)
else:
equal_indexes = False
if (index is by_index or equal_indexes):
return pandas.Series(data=arr, index=by_index, name=name)
if data_is_str_arr == True: # noqa
_res_data = [''] * len(by_index)
res_data_nan_mask = numpy.zeros(len(by_index), dtype=types.bool_)
else:
_res_data = numpy.empty(len(by_index), dtype=data_dtype)
# build a dict of self.index values to their positions:
map_index_to_position = Dict.empty(
key_type=index_dtype,
value_type=types.int32
)
for i, value in enumerate(index):
if value in map_index_to_position:
raise ValueError("cannot reindex from a duplicate axis")
else:
map_index_to_position[value] = i
index_mismatch = 0
# FIXME: TypingError in parfor step (wrong promotion to float64?) if prange is used
for i in numpy.arange(len(by_index)):
if by_index[i] in map_index_to_position:
pos_in_self = map_index_to_position[by_index[i]]
_res_data[i] = arr[pos_in_self]
if data_is_str_arr == True: # noqa
res_data_nan_mask[i] = isna(arr, i)
else:
index_mismatch += 1
if index_mismatch:
msg = "Unalignable boolean Series provided as indexer " + \
"(index of the boolean Series and of the indexed object do not match)."
raise IndexingError(msg)
if data_is_str_arr == True: # noqa
res_data = create_str_arr_from_list(_res_data)
str_arr_set_na_by_mask(res_data, res_data_nan_mask)
else:
res_data = _res_data
return | pandas.Series(data=res_data, index=by_index, name=name) | pandas.Series |
from ast import literal_eval
from datetime import timedelta
from faker import Faker
from src.make_feedback_tool_data.make_data_for_feedback_tool import (
create_dataset,
create_phrase_level_columns,
drop_duplicate_rows,
extract_phrase_mentions,
preprocess_filter_comment_text,
save_intermediate_df
)
from src.make_feedback_tool_data.preprocess import PreProcess
from pandas.testing import assert_frame_equal
import numpy as np
import pandas as pd
import pytest
import random
import re
# Set the random seed
random.seed(42)
# Create an example pandas DataFrame of data
DF_EXAMPLE_RAW = pd.DataFrame.from_dict({
"primary_key": [*range(7)],
"Q3": [
"", "-", "These words are in English.", "Cet mots sont en français.",
"This is in English, but there is a word here in اَلْعَرَبِيَّةُ",
"Η Βικιπαίδεια είναι διεθνής, παγκόσμια, ψηφιακή, διαδικτυακή, ελεύθερου περιεχομένου, εγκυκλοπαίδεια, που "
"βασίζεται σε ένα μοντέλο ανοικτό στη σύνταξη του περιεχομένου της. It is the largest and most popular general "
"reference work on the World Wide Web, and is one of the 20 most popular websites ranked by Alexa, as of March "
"2020.",
"維基百科 是维基媒体基金会运营的一个多语言的線上百科全書,并以创建和维护作为开放式协同合作项目,特点是自由內容、自由编辑、自由版权"
],
})
# Define example personally identifiable information for `EXAMPLE_PARAGRAPHS`
EXAMPLE_PII_REGEX = r"(English)|(World Wide Web)|(mots)"
# Create a pre-processed version of `DF_EXAMPLE`
DF_EXAMPLE_PRE_PROCESSED = DF_EXAMPLE_RAW \
.assign(Q3_pii_removed=DF_EXAMPLE_RAW["Q3"].str.replace(EXAMPLE_PII_REGEX, ""),
language=["un", "-", "en", "fr", "en", "el", "zh"],
is_en=[True, True, True, False, True, False, False]) \
.query("is_en")
@pytest.fixture
def patch_preprocess_pii_regex(mocker):
"""Patch the replace_pii_regex method of the PreProcess class with EXAMPLE_PII_REGEX."""
return mocker.patch.object(PreProcess, "replace_pii_regex", side_effect=lambda s: re.sub(EXAMPLE_PII_REGEX, "", s))
@pytest.fixture
def patch_preprocess_detect_language(mocker):
"""Patch the detect_language method of the PreProcess class"""
return mocker.patch.object(PreProcess, "detect_language")
@pytest.mark.parametrize("test_input_threshold", [*range(60, 110, 10)])
class TestPreProcessFilterCommentText:
def test_returns_correctly(self, patch_preprocess_pii_regex, test_input_threshold):
"""Test that the preprocess_filter_comment_text function returns the correct output."""
# Define the expected output
test_expected = DF_EXAMPLE_PRE_PROCESSED.query(f"Q3_pii_removed.str.len() < {test_input_threshold}")
# Call the `preprocess_filter_comment_text` function
test_output = preprocess_filter_comment_text(DF_EXAMPLE_RAW, test_input_threshold)
# Assert the same columns exist in both
assert set(test_output.columns) == set(test_expected.columns)
# Assert the output is as expected
assert_frame_equal(test_output, test_expected)
def test_preprocess_replace_pii_regex_call_count(self, patch_preprocess_pii_regex, test_input_threshold):
"""Test that preprocess_filter_comment_text calls PreProcess.replace_pii_regex the correct number of times."""
# Call the `preprocess_filter_comment_text` function
_ = preprocess_filter_comment_text(DF_EXAMPLE_RAW, test_input_threshold)
# Assert that `PreProcess.replace_pii_regex` is called the correct number of times
assert patch_preprocess_pii_regex.call_count == len(DF_EXAMPLE_RAW)
def test_preprocess_replace_pii_regex_called_correctly(self, mocker, patch_preprocess_pii_regex,
test_input_threshold):
"""Test that preprocess_filter_comment_text calls PreProcess.replace_pii_regex with the correct arguments."""
# Call the `preprocess_filter_comment_text` function
_ = preprocess_filter_comment_text(DF_EXAMPLE_RAW, test_input_threshold)
# Assert that `PreProcess.replace_pii_regex` is called with the correct arguments
assert patch_preprocess_pii_regex.call_args_list == [mocker.call(v) for v in DF_EXAMPLE_RAW["Q3"]]
def test_preprocess_detect_language_call_count(self, patch_preprocess_pii_regex, patch_preprocess_detect_language,
test_input_threshold):
"""Test that preprocess_filter_comment_text calls PreProcess.detect_language the correct number of times."""
# Call the `preprocess_filter_comment_text` function
_ = preprocess_filter_comment_text(DF_EXAMPLE_RAW, test_input_threshold)
# Get the expected call count
test_expected = DF_EXAMPLE_RAW \
.assign(Q3_pii_removed=DF_EXAMPLE_RAW["Q3"].str.replace(EXAMPLE_PII_REGEX, "")) \
.query(f"Q3_pii_removed.str.len() < {test_input_threshold}") \
.shape[0]
# Assert that `PreProcess.replace_pii_regex` is called the correct number of times
assert patch_preprocess_detect_language.call_count == test_expected
def test_preprocess_detect_language_called_correctly(self, mocker, patch_preprocess_pii_regex,
patch_preprocess_detect_language, test_input_threshold):
"""Test that preprocess_filter_comment_text calls PreProcess.detect_language with the correct arguments."""
# Call the `preprocess_filter_comment_text` function
_ = preprocess_filter_comment_text(DF_EXAMPLE_RAW, test_input_threshold)
# Define the expected values of the call arguments
text_expected_values = DF_EXAMPLE_RAW \
.assign(Q3_pii_removed=DF_EXAMPLE_RAW["Q3"].str.replace(EXAMPLE_PII_REGEX, "")) \
.query(f"Q3_pii_removed.str.len() < {test_input_threshold}") \
.Q3_pii_removed \
.to_list()
# Assert that `PreProcess.detect_language` is called with the correct arguments
assert patch_preprocess_detect_language.call_args_list == [mocker.call(v) for v in text_expected_values]
# Define input arguments for the `TestSaveIntermediateDf` test class; the first text is 'I am going to go and test to
# see if this example is correct.', the second text is 'If this test passes, we should be able to extract lemma and
# words.', the third text is a combination of the first and second text into a two-sentence text, and the fourth text
# is 'I tried to signed up for advice due to the ongoing COVID 19 outbreak with specific concern about vulnerable
# people. I could not!'
args_save_intermediate_df_inputs = [
{"pos_tag": [[[("I", "PRP", "-PRON-"), ("am", "VBP", "be"), ("going", "VBG", "go"), ("to", "TO", "to"),
("go", "VB", "go"), ("and", "CC", "and"), ("test", "VB", "test"), ("to", "TO", "to"),
("see", "VB", "see"), ("if", "IN", "if"), ("this", "DT", "this"), ("example", "NN", "example"),
("is", "VBZ", "be"), ("correct", "JJ", "correct"), (".", ".", ".")]]]},
{"pos_tag": [[[("If", "IN", "if"), ("this", "DT", "this"), ("test", "NN", "test"), ("passes", "VBZ", "pass"),
(",", ",", ","), ("we", "PRP", "-PRON-"), ("should", "MD", "should"), ("be", "VB", "be"),
("able", "JJ", "able"), ("to", "TO", "to"), ("extract", "VB", "extract"), ("lemma", "NN", "lemma"),
("and", "CC", "and"), ("words", "NNS", "word"), (".", ".", ".")]]]},
{"pos_tag": [[[("I", "PRP", "-PRON-"), ("am", "VBP", "be"), ("going", "VBG", "go"), ("to", "TO", "to"),
("go", "VB", "go"), ("and", "CC", "and"), ("test", "VB", "test"), ("to", "TO", "to"),
("see", "VB", "see"), ("if", "IN", "if"), ("this", "DT", "this"), ("example", "NN", "example"),
("is", "VBZ", "be"), ("correct", "JJ", "correct"), (".", ".", ".")],
[("If", "IN", "if"), ("this", "DT", "this"), ("test", "NN", "test"), ("passes", "VBZ", "pass"),
(",", ",", ","), ("we", "PRP", "-PRON-"), ("should", "MD", "should"), ("be", "VB", "be"),
("able", "JJ", "able"), ("to", "TO", "to"), ("extract", "VB", "extract"), ("lemma", "NN", "lemma"),
("and", "CC", "and"), ("words", "NNS", "word"), (".", ".", ".")]]]},
{"pos_tag": [[[("I", "PRP", "-PRON-"), ("tried", "VBD", "try"), ("to", "TO", "to"), ("signed", "VBN", "sign"),
("up", "RP", "up"), ("for", "IN", "for"), ("advice", "NN", "advice"), ("due", "IN", "due"),
("to", "IN", "to"), ("the", "DT", "the"), ("ongoing", "JJ", "ongoing"), ("COVID", "NNP", "COVID"),
("19", "CD", "19"), ("outbreak", "NN", "outbreak"), ("with", "IN", "with"),
("specific", "JJ", "specific"), ("concern", "NN", "concern"), ("about", "IN", "about"),
("vulnerable", "JJ", "vulnerable"), ("people", "NNS", "people")],
[("I", "PRP", "-PRON-"), ("could", "MD", "could"), ("not", "RB", "not"), ("!", ".", "!")]]]}
]
# Define the additional expected columns in the outputted CSV by the `save_intermediate_df` function - this will be
# in addition to the columns in `args_save_intermediate_df_inputs`
args_save_intermediate_df_expected = [
{"lemmas": [["-PRON-", "be", "go", "to", "go", "and", "test", "to", "see", "if", "this", "example", "be",
"correct", "."]],
"words": [["I", "am", "going", "to", "go", "and", "test", "to", "see", "if", "this", "example", "is", "correct",
"."]]},
{"lemmas": [["if", "this", "test", "pass", ",", "-PRON-", "should", "be", "able", "to", "extract", "lemma",
"and", "word", "."]],
"words": [["If", "this", "test", "passes", ",", "we", "should", "be", "able", "to", "extract", "lemma", "and",
"words", "."]]},
{"lemmas": [["-PRON-", "be", "go", "to", "go", "and", "test", "to", "see", "if", "this", "example", "be",
"correct", ".", "if", "this", "test", "pass", ",", "-PRON-", "should", "be", "able", "to",
"extract", "lemma", "and", "word", "."]],
"words": [["I", "am", "going", "to", "go", "and", "test", "to", "see", "if", "this", "example", "is", "correct",
".", "If", "this", "test", "passes", ",", "we", "should", "be", "able", "to", "extract", "lemma", "and",
"words", "."]]},
{"lemmas": [["-PRON-", "try", "to", "sign", "up", "for", "advice", "due", "to", "the", "ongoing", "COVID", "19",
"outbreak", "with", "specific", "concern", "about", "vulnerable", "people", "-PRON-", "could", "not",
"!"]],
"words": [["I", "tried", "to", "signed", "up", "for", "advice", "due", "to", "the", "ongoing", "COVID", "19",
"outbreak", "with", "specific", "concern", "about", "vulnerable", "people", "I", "could", "not", "!"]]}
]
# Create the test cases for the `TestSaveIntermediateDf` test class, where each tuple in the list consists of two
# elements; the first is each element of `args_save_intermediate_df_inputs` as a pandas DataFrame, and the second is
# a pandas DataFrame of the corresponding elements from `args_save_intermediate_df_inputs` and
# `args_save_intermediate_df_expected` as the expected outputs
args_save_intermediate_df = [
(pd.DataFrame(i), pd.DataFrame({**i, **e})) for i, e in zip(args_save_intermediate_df_inputs,
args_save_intermediate_df_expected)
]
@pytest.fixture
def patch_pandas_dataframe_to_csv(mocker):
"""Patch the pandas.DataFrame.to_csv method."""
return mocker.patch("pandas.DataFrame.to_csv")
@pytest.fixture
def temp_folder(tmpdir_factory):
"""Create a temporary directory to store the output from save_intermediate_df."""
return tmpdir_factory.mktemp("temp")
@pytest.mark.parametrize("test_input_cache_pos_filename", ["foo.csv", "bar.csv"])
class TestSaveIntermediateDf:
@pytest.mark.parametrize("test_input_df", [a[0] for a in args_save_intermediate_df])
def test_calls_to_csv_correctly(self, patch_pandas_dataframe_to_csv, test_input_df, test_input_cache_pos_filename):
"""Test save_intermediate_df calls pandas.DataFrame.to_csv correctly."""
# Call the `save_intermediate_df` function
save_intermediate_df(test_input_df, test_input_cache_pos_filename)
# Assert `pandas.DataFrame.to_csv` is called with the correct arguments
patch_pandas_dataframe_to_csv.assert_called_once_with(test_input_cache_pos_filename, index=False)
@pytest.mark.parametrize("test_input_df, test_expected_df", args_save_intermediate_df)
def test_returns_correctly(self, temp_folder, test_input_df, test_input_cache_pos_filename, test_expected_df):
"""Test the outputted CSV from save_intermediate_df is correct."""
# Define the file path for the CSV
test_input_file_path = temp_folder.join(test_input_cache_pos_filename)
# Call the `save_intermediate_df` function
save_intermediate_df(test_input_df, test_input_file_path)
# Assert the CSV output is correct; need to apply `ast.literal_eval` element-wise, as the CSV will contain
# strings of the lists, rather than the lists themselves
assert_frame_equal(pd.read_csv(test_input_file_path).applymap(literal_eval), test_expected_df)
# Define the example feedback that would result in `args_save_intermediate_df_inputs`
args_extract_phrase_mentions_inputs_q3_edit = [
"I am going to go and test to see if this example is correct.",
"If this test passes, we should be able to extract lemma and words.",
"I am going to go and test to see if this example is correct. If this test passes, we should be able to extract "
"lemma and words.",
"I tried to signed up for advice due to the ongoing COVID 19 outbreak with specific concern about vulnerable "
"people. I could not!"
]
# Define the inputs for the `extract_phrase_mentions` tests, where each tuple is a pandas DataFrame with columns
# 'Q3_edit' and 'pos_tag'
args_extract_phrase_mentions_integration = [
pd.DataFrame({"Q3_edit": t, **i}) for t, i in zip(args_extract_phrase_mentions_inputs_q3_edit,
args_save_intermediate_df_inputs)
]
@pytest.fixture
def patch_chunkparser_extract_phrase(mocker):
"""Patch both the ChunkParser class, and its extract_phrase method, but only return the latter."""
patch_chunkparser = mocker.patch("src.make_feedback_tool_data.make_data_for_feedback_tool.ChunkParser")
return patch_chunkparser.return_value.extract_phrase
@pytest.fixture
def patch_preprocess(mocker):
"""Patch the PreProcess class."""
return mocker.patch("src.make_feedback_tool_data.make_data_for_feedback_tool.PreProcess")
@pytest.mark.parametrize("test_input_df", args_extract_phrase_mentions_integration)
@pytest.mark.parametrize("test_input_grammar_filename", [None, "hello.txt", "world.txt"])
class TestExtractPhraseMentionsIntegration:
def test_calls_correctly(self, mocker, test_input_df, test_input_grammar_filename):
"""Test extract_phrase_mentions calls ChunkParser correctly."""
# Patch the `ChunkParser` class
patch_chunkparser = mocker.patch("src.make_feedback_tool_data.make_data_for_feedback_tool.ChunkParser")
# Call the `extract_phrase_mentions` function
_ = extract_phrase_mentions(test_input_df, test_input_grammar_filename)
# Assert `ChunkParser` is called once with the correct arguments
patch_chunkparser.assert_called_once_with(test_input_grammar_filename)
def test_calls_extract_phrase(self, mocker, patch_chunkparser_extract_phrase, test_input_df,
test_input_grammar_filename):
"""Test extract_phrase_mentions calls ChunkParser.extract_phrase correctly."""
# Call the `extract_phrase_mentions` function
_ = extract_phrase_mentions(test_input_df, test_input_grammar_filename)
# Assert `ChunkParser.extract_phrase` is called the correct number of times
assert patch_chunkparser_extract_phrase.call_count == len(test_input_df)
# Assert `ChunkParser.extract_phrase` is called with the correct arguments
for v in test_input_df["pos_tag"].values:
assert patch_chunkparser_extract_phrase.call_args_list == [mocker.call(v, merge_inplace=True)]
def test_calls_preprocess_compute_combinations_correctly(self, mocker, patch_chunkparser_extract_phrase,
patch_preprocess, test_input_df,
test_input_grammar_filename):
"""Test extract_phrase_mentions calls PreProcess.compute_combinations correctly."""
# Call the `extract_phrase_mentions` function
_ = extract_phrase_mentions(test_input_df, test_input_grammar_filename)
# Assert `PreProcess.compute_combinations` is called the correct number of times
assert patch_preprocess.compute_combinations.call_count == len(test_input_df)
# Define the expected call argument for each iteration - this will be the return value from calling
# `ChunkParser.extract_phrase`
test_expected = [mocker.call(patch_chunkparser_extract_phrase.return_value, 2)]
# Assert `ChunkParser.extract_phrase` is called with the correct arguments
assert patch_preprocess.compute_combinations.call_args_list == test_expected * len(test_input_df)
# Define the expected call arguments for `regex_group_verbs`
args_regex_group_verbs_call_args_expected = [
["test to see if"],
["to extract"],
["test to see if", "to extract"],
["tried to signed up for", "advice", "due to the ongoing covid 19 outbreak", "with specific concern"]
]
# Define the expected call arguments for `regex_for_theme`
args_regex_for_theme_call_args_expected = [
["this example"],
["lemma"],
["this example", "lemma"],
["advice", "due to the ongoing covid 19 outbreak", "with specific concern", "about vulnerable people"]
]
# Define the test cases for the `test_calls_regex_group_verbs_correctly` test in the
# `TestExtractPhraseMentionsIntegrationComboSection` test class
args_calls_regex_group_verbs_correctly = [
(i.copy(deep=True), e) for i, e in zip(args_extract_phrase_mentions_integration,
args_regex_group_verbs_call_args_expected)
]
# Define the test cases for the `test_calls_regex_for_theme_correctly` test in the
# `TestExtractPhraseMentionsIntegrationComboSection` test class
args_calls_regex_for_theme_correctly = [
(i.copy(deep=True), e) for i, e in zip(args_extract_phrase_mentions_integration,
args_regex_for_theme_call_args_expected)
]
# Define the expected call arguments for the `PreProcess.find_needle` method in `extract_phrase_mentions`
args_find_needle_called_correctly_expected = [
([("test to see if this example", "i am going to go and test to see if this example is correct."),
("test to see if", "test to see if this example")]),
([("to extract lemma", "if this test passes, we should be able to extract lemma and words."),
("to extract", "to extract lemma")]),
([("test to see if this example", "i am going to go and test to see if this example is correct. if this test "
"passes, we should be able to extract lemma and words."),
("test to see if", "test to see if this example"),
("to extract lemma", "i am going to go and test to see if this example is correct. if this test passes, "
"we should be able to extract lemma and words."),
("to extract", "to extract lemma")]),
([("tried to signed up for advice", "i tried to signed up for advice due to the ongoing covid 19 outbreak with "
"specific concern about vulnerable people. i could not!"),
("tried to signed up for", "tried to signed up for advice"),
("advice due to the ongoing covid 19 outbreak", "i tried to signed up for advice due to the ongoing covid 19 "
"outbreak with specific concern about vulnerable people. i could "
"not!"),
("advice", "advice due to the ongoing covid 19 outbreak"),
("due to the ongoing covid 19 outbreak with specific concern", "i tried to signed up for advice due to the "
"ongoing covid 19 outbreak with specific concern "
"about vulnerable people. i could not!"),
("due to the ongoing covid 19 outbreak", "due to the ongoing covid 19 outbreak with specific concern"),
("with specific concern about vulnerable people", "i tried to signed up for advice due to the ongoing covid 19 "
"outbreak with specific concern about vulnerable people. i "
"could not!"),
("with specific concern", "with specific concern about vulnerable people")])
]
# Define the test cases for the `test_find_needle_called_correctly` test in the
# `TestExtractPhraseMentionsIntegrationComboSection` test class
args_find_needle_called_correctly = [
(i.copy(deep=True), e) for i, e in zip(args_extract_phrase_mentions_integration,
args_find_needle_called_correctly_expected)
]
class TestExtractPhraseMentionsIntegrationComboSection:
@pytest.mark.parametrize("test_input, test_expected", args_calls_regex_group_verbs_correctly)
def test_calls_regex_group_verbs_correctly(self, mocker, test_input, test_expected):
"""Test extract_phrase_mentions calls regex_group_verbs correctly."""
# Patch the `regex_group_verbs` function
patch_regex_group_verbs = mocker.patch(
"src.make_feedback_tool_data.make_data_for_feedback_tool.regex_group_verbs"
)
# Call the `extract_phrase_mentions` function; assumes the default grammar file is unchanged
_ = extract_phrase_mentions(test_input, None)
# Assert `regex_group_verbs` is called the expected number of times
assert patch_regex_group_verbs.call_count == len(test_expected)
# Assert the call arguments to `regex_group_verbs` are as expected
assert patch_regex_group_verbs.call_args_list == [mocker.call(a) for a in test_expected]
@pytest.mark.parametrize("test_input, test_expected", args_calls_regex_for_theme_correctly)
def test_calls_regex_for_theme_correctly(self, mocker, test_input, test_expected):
"""Test extract_phrase_mentions calls regex_for_theme correctly."""
# Patch the `regex_for_theme` function
patch_regex_for_theme = mocker.patch(
"src.make_feedback_tool_data.make_data_for_feedback_tool.regex_for_theme"
)
# Call the `extract_phrase_mentions` function; assumes the default grammar file is unchanged
_ = extract_phrase_mentions(test_input, None)
# Assert `regex_for_theme` is called the expected number of times
assert patch_regex_for_theme.call_count == len(test_expected)
# Assert the call arguments to `regex_for_theme` are as expected
assert patch_regex_for_theme.call_args_list == [mocker.call(a) for a in test_expected]
@pytest.mark.parametrize("test_input, test_expected", args_find_needle_called_correctly)
def test_find_needle_called_correctly(self, mocker, test_input, test_expected):
"""Test extract_phrase_mentions calls the PreProcess.find_needle method corrrectly."""
# Patch the `PreProcess.find_needle` method
patch_find_needle = mocker.patch(
"src.make_feedback_tool_data.make_data_for_feedback_tool.PreProcess.find_needle",
wraps=PreProcess.find_needle
)
# Call the `extract_phrase_mentions` function; assumes the default grammar file is unchanged
_ = extract_phrase_mentions(test_input, None)
# Assert that the `PreProcess.find_needle` method is called the correct number of times
assert patch_find_needle.call_count == len(test_expected)
# Assert the call arguments for the `PreProcess.find_needle` method are correct
assert patch_find_needle.call_args_list == [mocker.call(*e) for e in test_expected]
# Define the expected values of the `test_extract_phrase_mentions_returns_correctly` test
args_extract_phrase_mentions_returns_correctly_expected = [
([[{"chunked_phrase": ("test to see if", "this example"),
"exact_phrase": ("test to see if", "this example"),
"generic_phrase": ("find", "unknown"),
"key": ("verb", "noun")}]]),
([[{"chunked_phrase": ("to extract", "lemma"),
"exact_phrase": ("to extract", "lemma"),
"generic_phrase": ("unknown", "unknown"),
"key": ("verb", "noun")}]]),
([[{"chunked_phrase": ("test to see if", "this example"),
"exact_phrase": ("test to see if", "this example"),
"generic_phrase": ("find", "unknown"),
"key": ("verb", "noun")},
{"chunked_phrase": ("to extract", "lemma"),
"exact_phrase": ("to extract", "lemma"),
"generic_phrase": ("unknown", "unknown"),
"key": ("verb", "noun")}]]),
([[{"chunked_phrase": ("tried to signed up for", "advice"),
"exact_phrase": ("tried to signed up for", "advice"),
"generic_phrase": ("apply", "information"),
"key": ("verb", "noun")},
{"chunked_phrase": ("advice", "due to the ongoing covid 19 outbreak"),
"exact_phrase": ("advice", "due to the ongoing covid 19 outbreak"),
"generic_phrase": ("unknown", "covid-mention"),
"key": ("noun", "prep_noun")},
{"chunked_phrase": ("due to the ongoing covid 19 outbreak", "with specific concern"),
"exact_phrase": ("due to the ongoing covid 19 outbreak", "with specific concern"),
"generic_phrase": ("unknown", "unknown"), "key": ("prep_noun", "prep_noun")},
{"chunked_phrase": ("with specific concern", "about vulnerable people"),
"exact_phrase": ("with specific concern", "about vulnerable people"),
"generic_phrase": ("unknown", "vulnerable"),
"key": ("prep_noun", "prep_noun")}]])
]
# Define the test cases for the `test_extract_phrase_mentions_returns_correctly` test
args_extract_phrase_mentions_returns_correctly = [
(i.copy(deep=True), i.copy(deep=True).assign(themed_phrase_mentions=e)) for i, e in zip(
args_extract_phrase_mentions_integration, args_extract_phrase_mentions_returns_correctly_expected
)
]
# Define expected outputs for the `test_create_phrase_level_columns_returns_correctly` test
args_create_phrase_level_columns_returns_correctly_expected = [
("test to see if, this example", "find, unknown"),
("to extract, lemma", "unknown, unknown"),
("test to see if, this example\nto extract, lemma", "find, unknown\nunknown, unknown"),
("tried to signed up for, advice", "apply, information")
]
# Initialise a storing variable for the `test_create_phrase_level_columns_returns_correctly` test
args_create_phrase_level_columns_returns_correctly = []
# Define the test cases for the `test_create_phrase_level_columns_returns_correctly` test
for i, e in zip(args_extract_phrase_mentions_returns_correctly_expected,
args_create_phrase_level_columns_returns_correctly_expected):
args_create_phrase_level_columns_returns_correctly.append((
| pd.DataFrame([i], columns=["themed_phrase_mentions"]) | pandas.DataFrame |
import copy
import datetime as dt
import logging
import os
import re
import warnings
from datetime import datetime
from unittest.mock import patch
import cftime
import numpy as np
import pandas as pd
import pytest
from numpy import testing as npt
from packaging.version import parse
from pandas.errors import UnsupportedFunctionCall
from pint.errors import DimensionalityError, UndefinedUnitError
from scmdata.errors import (
DuplicateTimesError,
MissingRequiredColumnError,
NonUniqueMetadataError,
)
from scmdata.run import BaseScmRun, ScmRun, run_append
from scmdata.testing import (
_check_pandas_less_110,
_check_pandas_less_120,
assert_scmdf_almost_equal,
)
@pytest.fixture
def scm_run_interpolated(scm_run):
return scm_run.interpolate(
[
dt.datetime(y, 1, 1)
for y in range(scm_run["year"].min(), scm_run["year"].max() + 1)
]
)
def test_init_df_year_converted_to_datetime(test_pd_df):
res = ScmRun(test_pd_df)
assert (res["year"].unique() == [2005, 2010, 2015]).all()
assert (
res["time"].unique()
== [dt.datetime(2005, 1, 1), dt.datetime(2010, 1, 1), dt.datetime(2015, 1, 1)]
).all()
@pytest.mark.parametrize(
"in_format",
[
"pd.Series",
"year_col",
"year_col_index",
"time_col",
"time_col_index",
"time_col_str_simple",
"time_col_str_complex",
"time_col_reversed",
"str_times",
],
)
def test_init_df_formats(test_pd_run_df, in_format):
if in_format == "pd.Series":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year").set_index(
idx + ["year"]
)["value"]
elif in_format == "year_col":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
elif in_format == "year_col_index":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year").set_index(
idx + ["year"]
)
elif in_format == "time_col":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(lambda x: dt.datetime(x, 1, 1))
test_init = test_init.drop("year", axis="columns")
elif in_format == "time_col_index":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(lambda x: dt.datetime(x, 1, 1))
test_init = test_init.drop("year", axis="columns")
test_init = test_init.set_index(idx + ["time"])
elif in_format == "time_col_str_simple":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(
lambda x: "{}-1-1 00:00:00".format(x)
)
test_init = test_init.drop("year", axis="columns")
elif in_format == "time_col_str_complex":
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_run_df.melt(id_vars=idx, var_name="year")
test_init["time"] = test_init["year"].apply(lambda x: "{}/1/1".format(x))
test_init = test_init.drop("year", axis="columns")
elif in_format == "time_col_reversed":
test_init = test_pd_run_df[test_pd_run_df.columns[::-1]]
elif in_format == "str_times":
test_init = test_pd_run_df.copy()
test_init.columns = test_init.columns.map(
lambda x: "{}/1/1".format(x) if isinstance(x, int) else x
)
res = ScmRun(test_init)
assert (res["year"].unique() == [2005, 2010, 2015]).all()
assert (
res["time"].unique()
== [dt.datetime(2005, 1, 1), dt.datetime(2010, 1, 1), dt.datetime(2015, 1, 1)]
).all()
assert "Start: 2005" in res.__repr__()
assert "End: 2015" in res.__repr__()
res_df = res.timeseries()
res_df.columns = res_df.columns.map(lambda x: x.year)
res_df = res_df.reset_index()
pd.testing.assert_frame_equal(
res_df[test_pd_run_df.columns.tolist()], test_pd_run_df, check_like=True,
)
def test_init_df_missing_time_axis_error(test_pd_df):
idx = ["climate_model", "model", "scenario", "region", "variable", "unit"]
test_init = test_pd_df.melt(id_vars=idx, var_name="year")
test_init = test_init.drop("year", axis="columns")
error_msg = re.escape("invalid time format, must have either `year` or `time`!")
with pytest.raises(ValueError, match=error_msg):
ScmRun(test_init)
def test_init_df_missing_time_columns_error(test_pd_df):
test_init = test_pd_df.copy()
test_init = test_init.drop(
test_init.columns[test_init.columns.map(lambda x: isinstance(x, int))],
axis="columns",
)
error_msg = re.escape(
"invalid column format, must contain some time (int, float or datetime) "
"columns!"
)
with pytest.raises(ValueError, match=error_msg):
ScmRun(test_init)
def test_init_df_missing_col_error(test_pd_df):
test_pd_df = test_pd_df.drop("model", axis="columns")
error_msg = re.escape("Missing required columns `['model']`!")
with pytest.raises(MissingRequiredColumnError, match=error_msg):
ScmRun(test_pd_df)
def test_init_ts_missing_col_error(test_ts):
error_msg = re.escape("Missing required columns `['model']`!")
with pytest.raises(MissingRequiredColumnError, match=error_msg):
ScmRun(
test_ts,
columns={
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario", "a_scenario2"],
"region": ["World"],
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": ["EJ/yr"],
},
index=[2005, 2010, 2015],
)
def test_init_required_cols(test_pd_df):
class MyRun(BaseScmRun):
required_cols = ("climate_model", "variable", "unit")
del test_pd_df["model"]
assert all([c in test_pd_df.columns for c in MyRun.required_cols])
MyRun(test_pd_df)
del test_pd_df["climate_model"]
assert not all([c in test_pd_df.columns for c in MyRun.required_cols])
error_msg = re.escape("Missing required columns `['climate_model']`!")
with pytest.raises(
MissingRequiredColumnError, match=error_msg,
):
MyRun(test_pd_df)
def test_init_multiple_file_error():
error_msg = re.escape(
"Initialising from multiple files not supported, use "
"`scmdata.run.ScmRun.append()`"
)
with pytest.raises(ValueError, match=error_msg):
ScmRun(["file_1", "filepath_2"])
def test_init_unrecognised_type_error():
fail_type = {"dict": "key"}
error_msg = re.escape("Cannot load {} from {}".format(str(ScmRun), type(fail_type)))
with pytest.raises(TypeError, match=error_msg):
ScmRun(fail_type)
def test_init_ts_col_string(test_ts):
res = ScmRun(
test_ts,
columns={
"model": "an_iam",
"climate_model": "a_model",
"scenario": ["a_scenario", "a_scenario", "a_scenario2"],
"region": "World",
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": "EJ/yr",
},
index=[2005, 2010, 2015],
)
npt.assert_array_equal(res["model"].unique(), "an_iam")
npt.assert_array_equal(res["climate_model"].unique(), "a_model")
npt.assert_array_equal(res["region"].unique(), "World")
npt.assert_array_equal(res["unit"].unique(), "EJ/yr")
@pytest.mark.parametrize("fail_setting", [["a_iam", "a_iam"]])
def test_init_ts_col_wrong_length_error(test_ts, fail_setting):
correct_scenarios = ["a_scenario", "a_scenario", "a_scenario2"]
error_msg = re.escape(
"Length of column 'model' is incorrect. It should be length 1 or {}".format(
len(correct_scenarios)
)
)
with pytest.raises(ValueError, match=error_msg):
ScmRun(
test_ts,
columns={
"model": fail_setting,
"climate_model": ["a_model"],
"scenario": correct_scenarios,
"region": ["World"],
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": ["EJ/yr"],
},
index=[2005, 2010, 2015],
)
def get_test_pd_df_with_datetime_columns(tpdf):
return tpdf.rename(
{
2005.0: dt.datetime(2005, 1, 1),
2010.0: dt.datetime(2010, 1, 1),
2015.0: dt.datetime(2015, 1, 1),
},
axis="columns",
)
def test_init_with_ts(test_ts, test_pd_df):
df = ScmRun(
test_ts,
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario", "a_scenario2"],
"region": ["World"],
"variable": ["Primary Energy", "Primary Energy|Coal", "Primary Energy"],
"unit": ["EJ/yr"],
},
index=[2005, 2010, 2015],
)
tdf = get_test_pd_df_with_datetime_columns(test_pd_df)
pd.testing.assert_frame_equal(df.timeseries().reset_index(), tdf, check_like=True)
b = ScmRun(test_pd_df)
assert_scmdf_almost_equal(df, b, check_ts_names=False)
def test_init_with_scmdf(test_scm_run_datetimes, test_scm_datetime_run):
df = ScmRun(test_scm_run_datetimes,)
assert_scmdf_almost_equal(df, test_scm_datetime_run, check_ts_names=False)
@pytest.mark.parametrize(
"years", [["2005.0", "2010.0", "2015.0"], ["2005", "2010", "2015"]]
)
def test_init_with_years_as_str(test_pd_df, years):
df = copy.deepcopy(
test_pd_df
) # This needs to be a deep copy so it doesn't break the other tests
cols = copy.deepcopy(test_pd_df.columns.values)
cols[-3:] = years
df.columns = cols
df = ScmRun(df)
obs = df.time_points.values
exp = np.array(
[dt.datetime(2005, 1, 1), dt.datetime(2010, 1, 1), dt.datetime(2015, 1, 1)],
dtype="datetime64[s]",
)
assert (obs == exp).all()
def test_init_with_year_columns(test_pd_df):
df = ScmRun(test_pd_df)
tdf = get_test_pd_df_with_datetime_columns(test_pd_df)
pd.testing.assert_frame_equal(df.timeseries().reset_index(), tdf, check_like=True)
def test_init_with_decimal_years():
inp_array = [2.0, 1.2, 7.9]
d = pd.Series(inp_array, index=[1765.0, 1765.083, 1765.167])
cols = {
"model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": ["Primary Energy"],
"unit": ["EJ/yr"],
}
res = ScmRun(d, columns=cols)
assert (
res["time"].unique()
== [
dt.datetime(1765, 1, 1, 0, 0),
dt.datetime(1765, 1, 31, 7, 4, 48),
dt.datetime(1765, 3, 2, 22, 55, 11),
]
).all()
npt.assert_array_equal(res.values[0], inp_array)
def test_init_df_from_timeseries(test_scm_df_mulitple):
df = ScmRun(test_scm_df_mulitple.timeseries())
assert_scmdf_almost_equal(df, test_scm_df_mulitple, check_ts_names=False)
def test_init_df_with_extra_col(test_pd_df):
tdf = test_pd_df.copy()
extra_col = "test value"
extra_value = "scm_model"
tdf[extra_col] = extra_value
df = ScmRun(tdf)
tdf = get_test_pd_df_with_datetime_columns(tdf)
assert extra_col in df.meta
pd.testing.assert_frame_equal(df.timeseries().reset_index(), tdf, check_like=True)
def test_init_df_without_required_arguments(test_run_ts):
with pytest.raises(ValueError, match="`columns` argument is required"):
ScmRun(test_run_ts, index=[2000, 20005, 2010], columns=None)
with pytest.raises(ValueError, match="`index` argument is required"):
ScmRun(test_run_ts, index=None, columns={"variable": "test"})
def test_init_iam(test_iam_df, test_pd_df):
a = ScmRun(test_iam_df)
b = ScmRun(test_pd_df)
assert_scmdf_almost_equal(a, b, check_ts_names=False)
def test_init_self(test_iam_df):
a = ScmRun(test_iam_df)
b = ScmRun(a)
assert_scmdf_almost_equal(a, b)
def test_init_with_metadata(scm_run):
expected_metadata = {"test": "example"}
b = ScmRun(scm_run.timeseries(), metadata=expected_metadata)
# Data should be copied
assert id(b.metadata) != id(expected_metadata)
assert b.metadata == expected_metadata
def test_init_self_with_metadata(scm_run):
scm_run.metadata["test"] = "example"
b = ScmRun(scm_run)
assert id(scm_run.metadata) != id(b.metadata)
assert scm_run.metadata == b.metadata
c = ScmRun(scm_run, metadata={"test": "other"})
assert c.metadata == {"test": "other"}
def _check_copy(a, b, copy_data):
if copy_data:
assert id(a.values.base) != id(b.values.base)
else:
assert id(a.values.base) == id(b.values.base)
@pytest.mark.parametrize("copy_data", [True, False])
def test_init_with_copy_run(copy_data, scm_run):
res = ScmRun(scm_run, copy_data=copy_data)
assert id(res) != id(scm_run)
_check_copy(res._df, scm_run._df, copy_data)
@pytest.mark.parametrize("copy_data", [True, False])
def test_init_with_copy_dataframe(copy_data, test_pd_df):
res = ScmRun(test_pd_df, copy_data=copy_data)
# an incoming pandas DF no longer references the original
_check_copy(res._df, test_pd_df, True)
def test_init_duplicate_columns(test_pd_df):
exp_msg = (
"Duplicate times (numbers show how many times the given " "time is repeated)"
)
inp = pd.concat([test_pd_df, test_pd_df[2015]], axis=1)
with pytest.raises(DuplicateTimesError) as exc_info:
ScmRun(inp)
error_msg = exc_info.value.args[0]
assert error_msg.startswith(exp_msg)
pd.testing.assert_index_equal(
pd.Index([2005, 2010, 2015, 2015], dtype="object", name="time"),
exc_info.value.time_index,
)
def test_init_empty(scm_run):
empty_run = ScmRun()
assert empty_run.empty
assert empty_run.filter(model="*").empty
empty_run.append(scm_run, inplace=True)
assert not empty_run.empty
def test_repr_empty():
empty_run = ScmRun()
assert str(empty_run) == empty_run.__repr__()
repr = str(empty_run)
assert "Start: N/A" in repr
assert "End: N/A" in repr
assert "timeseries: 0, timepoints: 0" in repr
def test_as_iam(test_iam_df, test_pd_df, iamdf_type):
df = ScmRun(test_pd_df).to_iamdataframe()
# test is skipped by test_iam_df fixture if pyam isn't installed
assert isinstance(df, iamdf_type)
pd.testing.assert_frame_equal(test_iam_df.meta, df.meta)
# we switch to time so ensure sensible comparison of columns
tdf = df.data.copy()
tdf["year"] = tdf["time"].apply(lambda x: x.year)
tdf.drop("time", axis="columns", inplace=True)
pd.testing.assert_frame_equal(test_iam_df.data, tdf, check_like=True)
def test_get_item(scm_run):
assert scm_run["model"].unique() == ["a_iam"]
@pytest.mark.parametrize(
"value,output",
(
(1, [np.nan, np.nan, 1.0]),
(1.0, (np.nan, np.nan, 1.0)),
("test", ["nan", "nan", "test"]),
),
)
def test_get_item_with_nans(scm_run, value, output):
expected_values = [np.nan, np.nan, value]
scm_run["extra"] = expected_values
exp = pd.Series(output, name="extra")
pd.testing.assert_series_equal(scm_run["extra"], exp, check_exact=value != "test")
def test_get_item_not_in_meta(scm_run):
dud_key = 0
error_msg = re.escape("[{}] is not in metadata".format(dud_key))
with pytest.raises(KeyError, match=error_msg):
scm_run[dud_key]
def test_set_item(scm_run):
scm_run["model"] = ["a_iam", "b_iam", "c_iam"]
assert all(scm_run["model"] == ["a_iam", "b_iam", "c_iam"])
def test_set_item_not_in_meta(scm_run):
with pytest.raises(ValueError):
scm_run["junk"] = ["hi", "bye"]
scm_run["junk"] = ["hi", "bye", "ciao"]
assert all(scm_run["junk"] == ["hi", "bye", "ciao"])
def test_len(scm_run):
assert len(scm_run) == len(scm_run.timeseries())
def test_shape(scm_run):
assert scm_run.shape == scm_run.timeseries().shape
def test_head(scm_run):
pd.testing.assert_frame_equal(scm_run.head(2), scm_run.timeseries().head(2))
def test_tail(scm_run):
pd.testing.assert_frame_equal(scm_run.tail(1), scm_run.timeseries().tail(1))
def test_values(scm_run):
# implicitly checks that `.values` returns the data with each row being a
# timeseries and each column being a timepoint
npt.assert_array_equal(scm_run.values, scm_run.timeseries().values)
def test_variable_depth_0(scm_run):
obs = list(scm_run.filter(level=0)["variable"].unique())
exp = ["Primary Energy"]
assert obs == exp
def test_variable_depth_0_with_base():
tdf = ScmRun(
data=np.array([[1, 6.0, 7], [0.5, 3, 2], [2, 7, 0], [-1, -2, 3]]).T,
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": [
"Primary Energy",
"Primary Energy|Coal",
"Primary Energy|Coal|Electricity",
"Primary Energy|Gas|Heating",
],
"unit": ["EJ/yr"],
},
index=[
dt.datetime(2005, 1, 1),
dt.datetime(2010, 1, 1),
dt.datetime(2015, 6, 12),
],
)
obs = list(tdf.filter(variable="Primary Energy|*", level=1)["variable"].unique())
exp = ["Primary Energy|Coal|Electricity", "Primary Energy|Gas|Heating"]
assert all([e in obs for e in exp]) and len(obs) == len(exp)
def test_variable_depth_0_keep_false(scm_run):
obs = list(scm_run.filter(level=0, keep=False)["variable"].unique())
exp = ["Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_0_minus(scm_run):
obs = list(scm_run.filter(level="0-")["variable"].unique())
exp = ["Primary Energy"]
assert obs == exp
def test_variable_depth_0_plus(scm_run):
obs = list(scm_run.filter(level="0+")["variable"].unique())
exp = ["Primary Energy", "Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_1(scm_run):
obs = list(scm_run.filter(level=1)["variable"].unique())
exp = ["Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_1_minus(scm_run):
obs = list(scm_run.filter(level="1-")["variable"].unique())
exp = ["Primary Energy", "Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_1_plus(scm_run):
obs = list(scm_run.filter(level="1+")["variable"].unique())
exp = ["Primary Energy|Coal"]
assert obs == exp
def test_variable_depth_raises(scm_run):
pytest.raises(ValueError, scm_run.filter, level="1/")
def test_filter_error(scm_run):
pytest.raises(ValueError, scm_run.filter, foo="foo")
def test_filter_year(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(year=2005)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_year_error(test_scm_run_datetimes):
error_msg = re.escape("`year` can only be filtered with ints or lists of ints")
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(year=2005.0)
def test_filter_year_with_own_year(test_scm_run_datetimes):
res = test_scm_run_datetimes.filter(year=test_scm_run_datetimes["year"].values)
assert (res["year"].unique() == test_scm_run_datetimes["year"].unique()).all()
@pytest.mark.parametrize(
"year_list", ([2005, 2010], (2005, 2010), np.array([2005, 2010]).astype(int),)
)
def test_filter_year_list(year_list, test_scm_run_datetimes):
res = test_scm_run_datetimes.filter(year=year_list)
expected = [2005, 2010]
assert (res["year"].unique() == expected).all()
def test_filter_inplace(test_scm_run_datetimes):
test_scm_run_datetimes.filter(year=2005, inplace=True)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = test_scm_run_datetimes["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_month", [6, "June", "Jun", "jun", ["Jun", "jun"]])
def test_filter_month(test_scm_run_datetimes, test_month):
obs = test_scm_run_datetimes.filter(month=test_month)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_month", [6, "Jun", "jun", ["Jun", "jun"]])
def test_filter_year_month(test_scm_run_datetimes, test_month):
obs = test_scm_run_datetimes.filter(year=2005, month=test_month)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_day", [17, "Fri", "Friday", "friday", ["Fri", "fri"]])
def test_filter_day(test_scm_run_datetimes, test_day):
obs = test_scm_run_datetimes.filter(day=test_day)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("test_hour", [12, [12, 13]])
def test_filter_hour(test_scm_run_datetimes, test_hour):
obs = test_scm_run_datetimes.filter(hour=test_hour)
test_hour = [test_hour] if isinstance(test_hour, int) else test_hour
expected_rows = (
test_scm_run_datetimes["time"].apply(lambda x: x.hour).isin(test_hour)
)
expected = test_scm_run_datetimes["time"].loc[expected_rows].unique()
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected[0]
def test_filter_hour_multiple(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(hour=0)
expected_rows = test_scm_run_datetimes["time"].apply(lambda x: x.hour).isin([0])
expected = test_scm_run_datetimes["time"].loc[expected_rows].unique()
unique_time = obs["time"].unique()
assert len(unique_time) == 2
assert all([dt in unique_time for dt in expected])
def test_filter_time_exact_match(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(time=dt.datetime(2005, 6, 17, 12))
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_time_range(test_scm_run_datetimes):
error_msg = r".*datetime.datetime.*"
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(
year=range(dt.datetime(2000, 6, 17), dt.datetime(2009, 6, 17))
)
def test_filter_time_range_year(test_scm_run_datetimes):
obs = test_scm_run_datetimes.filter(year=range(2000, 2008))
unique_time = obs["time"].unique()
expected = dt.datetime(2005, 6, 17, 12)
assert len(unique_time) == 1
assert unique_time[0] == expected
@pytest.mark.parametrize("month_range", [range(3, 7), "Mar-Jun"])
def test_filter_time_range_month(test_scm_run_datetimes, month_range):
obs = test_scm_run_datetimes.filter(month=month_range)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_time_range_month_unrecognised_error(test_scm_run_datetimes):
fail_filter = "Marb-Jun"
error_msg = re.escape(
"Could not convert month '{}' to integer".format(
[m for m in fail_filter.split("-")]
)
)
with pytest.raises(ValueError, match=error_msg):
test_scm_run_datetimes.filter(month=fail_filter)
@pytest.mark.parametrize("month_range", [["Mar-Jun", "Nov-Feb"]])
def test_filter_time_range_round_the_clock_error(test_scm_run_datetimes, month_range):
error_msg = re.escape(
"string ranges must lead to increasing integer ranges, "
"Nov-Feb becomes [11, 2]"
)
with pytest.raises(ValueError, match=error_msg):
test_scm_run_datetimes.filter(month=month_range)
@pytest.mark.parametrize("day_range", [range(14, 20), "Thu-Sat"])
def test_filter_time_range_day(test_scm_run_datetimes, day_range):
obs = test_scm_run_datetimes.filter(day=day_range)
expected = dt.datetime(2005, 6, 17, 12)
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected
def test_filter_time_range_day_unrecognised_error(test_scm_run_datetimes):
fail_filter = "Thud-Sat"
error_msg = re.escape(
"Could not convert day '{}' to integer".format(
[m for m in fail_filter.split("-")]
)
)
with pytest.raises(ValueError, match=error_msg):
test_scm_run_datetimes.filter(day=fail_filter)
@pytest.mark.parametrize("hour_range", [range(10, 14)])
def test_filter_time_range_hour(test_scm_run_datetimes, hour_range):
obs = test_scm_run_datetimes.filter(hour=hour_range)
expected_rows = (
test_scm_run_datetimes["time"].apply(lambda x: x.hour).isin(hour_range)
)
expected = test_scm_run_datetimes["time"][expected_rows].unique()
unique_time = obs["time"].unique()
assert len(unique_time) == 1
assert unique_time[0] == expected[0]
def test_filter_time_no_match(test_scm_datetime_run):
obs = test_scm_datetime_run.filter(time=dt.datetime(2004, 6, 18))
assert len(obs.time_points) == 0
assert obs.shape[1] == 0
assert obs.values.shape[1] == 0
def test_filter_time_not_datetime_error(test_scm_run_datetimes):
error_msg = re.escape("`time` can only be filtered with datetimes")
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(time=2005)
def test_filter_time_not_datetime_range_error(test_scm_run_datetimes):
error_msg = re.escape("`time` can only be filtered with datetimes")
with pytest.raises(TypeError, match=error_msg):
test_scm_run_datetimes.filter(time=range(2000, 2008))
def test_filter_as_kwarg(scm_run):
obs = list(scm_run.filter(variable="Primary Energy|Coal")["scenario"].unique())
assert obs == ["a_scenario"]
def test_filter_keep_false_time(scm_run):
df = scm_run.filter(year=2005, keep=False)
assert 2005 not in df.time_points.years()
assert 2010 in df.time_points.years()
obs = df.filter(scenario="a_scenario").timeseries().values.ravel()
npt.assert_array_equal(obs, [6, 6, 3, 3])
def test_filter_keep_false_metadata(scm_run):
df = scm_run.filter(variable="Primary Energy|Coal", keep=False)
assert "Primary Energy|Coal" not in df["variable"].tolist()
assert "Primary Energy" in df["variable"].tolist()
obs = df.filter(scenario="a_scenario").timeseries().values.ravel()
npt.assert_array_equal(obs, [1, 6, 6])
def test_filter_keep_false_time_and_metadata(scm_run):
error_msg = (
"If keep==False, filtering cannot be performed on the temporal axis "
"and with metadata at the same time"
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
scm_run.filter(variable="Primary Energy|Coal", year=2005, keep=False)
def test_filter_keep_false_successive(scm_run):
df = scm_run.filter(variable="Primary Energy|Coal", keep=False).filter(
year=2005, keep=False
)
obs = df.filter(scenario="a_scenario").timeseries().values.ravel()
npt.assert_array_equal(obs, [6, 6])
def test_filter_by_regexp(scm_run):
obs = scm_run.filter(scenario="a_scenari.$", regexp=True)
assert obs["scenario"].unique() == "a_scenario"
@pytest.mark.parametrize(
"regexp,exp_units", ((True, []), (False, ["W/m^2"]),),
)
def test_filter_by_regexp_caret(scm_run, regexp, exp_units):
tunits = ["W/m2"] * scm_run.shape[1]
tunits[-1] = "W/m^2"
scm_run["unit"] = tunits
obs = scm_run.filter(unit="W/m^2", regexp=regexp)
if not exp_units:
assert obs.empty
else:
assert obs.get_unique_meta("unit") == exp_units
def test_filter_asterisk_edgecase(scm_run):
scm_run["extra"] = ["*", "*", "other"]
obs = scm_run.filter(scenario="*")
assert len(obs) == len(scm_run)
obs = scm_run.filter(scenario="*", level=0)
assert len(obs) == 2
obs = scm_run.filter(scenario="a_scenario", level=0)
assert len(obs) == 1
# Weird case where "*" matches everything instead of "*" in
obs = scm_run.filter(extra="*", regexp=False)
assert len(obs) == len(scm_run)
assert (obs["extra"] == ["*", "*", "other"]).all()
# Not valid regex
pytest.raises(re.error, scm_run.filter, extra="*", regexp=True)
def test_filter_timeseries_different_length():
# This is different to how `ScmDataFrame` deals with nans
# Nan and empty timeseries remain in the Run
df = ScmRun(
pd.DataFrame(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, np.nan]]).T, index=[2000, 2001, 2002]
),
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario2"],
"region": ["World"],
"variable": ["Primary Energy"],
"unit": ["EJ/yr"],
},
)
npt.assert_array_equal(
df.filter(scenario="a_scenario2").timeseries().squeeze(), [4.0, 5.0, np.nan]
)
npt.assert_array_equal(df.filter(year=2002).timeseries().squeeze(), [3.0, np.nan])
exp = pd.Series(["a_scenario", "a_scenario2"], name="scenario")
obs = df.filter(year=2002)["scenario"]
pd.testing.assert_series_equal(exp, obs)
assert not df.filter(scenario="a_scenario2", year=2002).timeseries().empty
def test_filter_timeseries_nan_meta():
df = ScmRun(
pd.DataFrame(
np.array([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]).T, index=[2000, 2001]
),
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario", "a_scenario2", np.nan],
"region": ["World"],
"variable": ["Primary Energy"],
"unit": ["EJ/yr"],
},
)
def with_nan_assertion(a, b):
assert len(a) == len(b)
assert all(
[(v == b[i]) or (np.isnan(v) and np.isnan(b[i])) for i, v in enumerate(a)]
)
res = df.filter(scenario="*")["scenario"].unique()
exp = ["a_scenario", "a_scenario2", np.nan]
with_nan_assertion(res, exp)
res = df.filter(scenario="")["scenario"].unique()
exp = [np.nan]
with_nan_assertion(res, exp)
res = df.filter(scenario=np.nan)["scenario"].unique()
exp = [np.nan]
with_nan_assertion(res, exp)
def test_filter_index(scm_run):
pd.testing.assert_index_equal(scm_run.meta.index, pd.Int64Index([0, 1, 2]))
run = scm_run.filter(variable="Primary Energy")
exp_index = | pd.Int64Index([0, 2]) | pandas.Int64Index |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data= | pd.read_csv('real_now.csv',index_col=0,header=0) | pandas.read_csv |
"""
A sbatch wrapper for stampede2
See stampede2 doc:
https://portal.tacc.utexas.edu/user-guides/stampede2#running-sbatch
"""
import pathlib
import re
import shlex
import subprocess
import time
from collections import defaultdict
import pandas as pd
import random
import string
import cemba_data
PACKAGE_DIR = pathlib.Path(cemba_data.__path__[0])
# see stampede2 doc https://portal.tacc.utexas.edu/user-guides/stampede2#running-queues
# name: max_jobs
STAMPEDE2_QUEUES = {
'development': 1,
'normal': 50,
'large': 5,
'long': 2,
'flat-quadrant': 5,
'skx-dev': 1,
'skx-normal': 20,
'skx-large': 3
}
def judge_job_success(job_id, retry=3):
tried = 0
for i in range(retry):
try:
p = subprocess.run(['sacct', '-j', str(job_id), '--format=jobid,exitcode'],
check=True, stdout=subprocess.PIPE, encoding='utf8')
sacct_txt = p.stdout
pt = re.compile(f'{job_id}\s+0:0')
if pt.search(sacct_txt):
return True
else:
return False
except subprocess.CalledProcessError:
tried += 1
print(f'sacct error, try again {tried}/{retry}')
time.sleep(10)
return False
def get_job_id(sbatch_result):
"""
parse the sbatch output, check status, and get job id
Parameters
----------
sbatch_result
Returns
-------
sbatch job_id
"""
job_id = None
for line in sbatch_result.split('\n'):
line = line.strip()
if line.startswith('-->'):
# status line
if line.endswith("OK"):
continue
else:
print(sbatch_result)
raise ValueError("sbatch output is abnormal, see information above")
elif line.startswith('Submitted batch job '):
# job id line
job_id = line.split(' ')[-1]
else:
pass
if job_id is None:
print(sbatch_result)
raise ValueError('Can not get job id from sbatch output, see information above')
return job_id
def submit_sbatch(job_script_path):
"""
submit sbatch job and return job id
Parameters
----------
job_script_path
Returns
-------
"""
try:
p = subprocess.run(['sbatch', str(job_script_path)],
check=True,
encoding='utf8',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
job_id = get_job_id(p.stdout)
print(f'Submit job script: {job_script_path}. Job ID is {job_id}.')
except subprocess.CalledProcessError as e:
print('sbatch STDOUT')
print(e.stdout)
print('sbatch STDERR')
print(e.stderr)
print('sbatch submission failed, see information above.')
raise e
return job_id
def squeue(partition):
"""
check current running job
Returns
-------
squeue results in a pd.DataFrame
"""
user_name = subprocess.run(['whoami'],
check=True,
encoding='utf8',
stdout=subprocess.PIPE).stdout.strip()
for i in range(3):
try:
squeue_result = subprocess.run(['squeue', '-u', user_name],
check=True,
encoding='utf8',
stdout=subprocess.PIPE).stdout
break
except subprocess.CalledProcessError:
print(f'Squeue got an error, waiting 60s and trying again {i + 1}/3')
time.sleep(60)
continue
else:
raise SystemError('Squeue command failed')
print('Current squeue output:')
print(squeue_result, end='\n')
records = []
col_names = []
col_end_pos = []
for i, line in enumerate(squeue_result.rstrip().split('\n')):
if i == 0:
sep_pattern = re.compile(r' +')
col_names = sep_pattern.split(line.strip())
col_end_pos = [0] + [line.index(col_name) + len(col_name) for col_name in col_names]
if line == '':
continue
record = []
for j in range(len(col_end_pos) - 1):
if j != len(col_names) - 1:
col_data = line[col_end_pos[j]:col_end_pos[j + 1]]
else:
# for last column, take all the rest chr
col_data = line[col_end_pos[j]:]
record.append(col_data.strip())
records.append(record)
squeue_df = pd.DataFrame(records[1:],
columns=records[0]).set_index('JOBID')
total_job = squeue_df.shape[0]
try:
squeue_df = squeue_df[squeue_df['PARTITION'].str.lower() == partition.lower()].copy()
return squeue_df, total_job
except KeyError:
print(squeue_df)
return squeue_df, total_job
def make_sbatch_script_files(commands, sbatch_dir, name_prefix, queue, time_str, email, email_type, template='yap'):
"""See stampede2 doc: https://portal.tacc.utexas.edu/user-guides/stampede2#running-sbatch"""
if template == 'yap':
with open(PACKAGE_DIR / 'files/sbatch_template_yap.txt') as f:
sbatch_template = f.read()
elif template == 'schicluster':
with open(PACKAGE_DIR / 'files/sbatch_template_schicluster.txt') as f:
sbatch_template = f.read()
else:
raise ValueError('Only support ["yap", "schicluster"] template')
sbatch_dir = pathlib.Path(sbatch_dir)
if email is not None:
email_str = f'#SBATCH --mail-user={email}'
email_type_str = f'#SBATCH --mail-type={email_type}'
else:
email_str = ''
email_type_str = ''
env_dir_random = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
queue_job_path_list = []
for i, command in enumerate(commands):
job_name = f'{name_prefix}_{i}'
sbatch_script = sbatch_template.format(
job_name=job_name,
queue=queue,
time_str=time_str,
email_str=email_str,
email_type_str=email_type_str,
command=command,
log_dir=sbatch_dir,
env_dir_random=env_dir_random
)
job_script_path = sbatch_dir / f'{job_name}.sh'
with open(job_script_path, 'w') as f:
f.write(sbatch_script)
queue_job_path_list.append(job_script_path)
return queue_job_path_list
def sacct(jobs):
sep_pattern = re.compile(r' +')
sacct_cmd = f'sacct -j {",".join(jobs)} ' \
f'--format=jobid,jobname,partition,alloccpus,elapsed,state,exitcode'
sacct_result = subprocess.run(shlex.split(sacct_cmd),
check=True,
encoding='utf8',
stdout=subprocess.PIPE).stdout
lines = []
header = ''
col_starts = []
for i, line in enumerate(sacct_result.rstrip('\n').split('\n')):
if i == 0:
header = line
elif i == 1:
# the second row indicates col width, use it to determine col_starts
col_width = [len(s) for s in sep_pattern.split(line)]
cur_pos = 0
col_starts = [0]
for length in col_width:
cur_pos += (length + 1)
col_starts.append(cur_pos)
else:
lines.append(line)
columns = [header[col_starts[i]:col_starts[i + 1]].strip() for i in range(len(col_starts) - 1)]
data = []
for line in lines:
ll = [line[col_starts[i]:col_starts[i + 1]].strip() for i in range(len(col_starts) - 1)]
data.append(ll)
sacct_data = pd.DataFrame(data, columns=columns).set_index('JobID')
sacct_data = sacct_data[~sacct_data.index.str.endswith('bat+')].copy()
sacct_data['Success'] = sacct_data['ExitCode'] == '0:0'
return sacct_data
def sbatch_submitter(project_name, command_file_path, working_dir, time_str, queue='skx-normal',
email=None, email_type='fail', max_jobs=None, dry_run=False, retry=2,
template='yap'):
# read commands
with open(command_file_path) as f:
# I always assume the command is ordered with descending priority.
# But sbatch will submit last job first (list.pop), so reverse the order here.
commands = [line.rstrip('\n') for line in f if not line.startswith('#')][::-1]
# set name
project_name = project_name.replace(' ', '_')
# check queue
queue = queue.lower()
if queue not in STAMPEDE2_QUEUES:
raise KeyError(f'queue name {queue} not found in STAMPEDE2_QUEUES, '
f'available queues are {list(STAMPEDE2_QUEUES.keys())}')
# set max_jobs
_max_jobs = STAMPEDE2_QUEUES[queue]
if max_jobs is None:
max_jobs = _max_jobs
else:
max_jobs = min(max_jobs, _max_jobs)
print(f'Max concurrent sbatch jobs {max_jobs}, stampede2 allows {STAMPEDE2_QUEUES[queue]}.')
# make sbatch_dir
sbatch_dir = pathlib.Path(working_dir) / f'{project_name}_sbatch'
sbatch_dir.mkdir(exist_ok=True, parents=True)
# check if sacct file exists, which could from previous submission.
# I only keep successful items, and skip them in this submission.
sacct_path = sbatch_dir / 'sacct.csv.gz'
previous_sacct_df_success = None
successful_script_paths = set()
if sacct_path.exists():
print('Found previous submission records, successful jobs will not be submit again.')
previous_sacct_df = pd.read_csv(sacct_path, index_col=0)
previous_sacct_df_success = previous_sacct_df[previous_sacct_df['Success']]
successful_script_paths = set(previous_sacct_df_success['ScriptPath'].astype(str).tolist())
print(f'Successful script paths: {", ".join(successful_script_paths)}')
# create job script files
queue_job_path_list = make_sbatch_script_files(
commands=commands,
sbatch_dir=sbatch_dir,
name_prefix=project_name,
queue=queue,
time_str=time_str,
email=email,
email_type=email_type,
template=template
)
# prepare submission
running_job_id_set = set() # sbatch_id
finished_job_id_set = set() # job_id
job_id_to_script_path = {}
# start submission
sleepy = 30
flag_path = sbatch_dir / 'RUNNING_SIGNAL'
if flag_path.exists():
raise FileExistsError(f'Running signal exists {flag_path}. '
f'Make sure you do not have sbatch submitter running and (if so) delete that flag file.')
with open(flag_path, 'w') as f:
f.write('')
script_path_to_tried_times = defaultdict(int)
if not dry_run:
squeue_fail = 0
while (len(queue_job_path_list) != 0) or (len(running_job_id_set) != 0):
if not flag_path.exists():
# break if flag missing
break
# squeue and update running job status
try:
squeue_df, total_job = squeue(partition=queue)
squeue_fail = 0
except Exception as e:
print('Squeue parser raised an error, will retry after 150s.')
squeue_fail += 1
if squeue_fail > 10:
raise e
time.sleep(150)
continue
# queue limit and total job limit both apply
remaining_slots = min((max_jobs - squeue_df.shape[0], 50 - total_job))
# the max_jobs is apply to user level, not to the current submitter level
if remaining_slots > 0:
# things are getting done, weak up
sleepy = 30
# check running jobs
new_running_job_id_set = set()
for job_id in running_job_id_set:
if job_id not in squeue_df.index:
# running job finished
if judge_job_success(job_id):
# job succeed
finished_job_id_set.add(job_id)
else:
# job failed
script_path = job_id_to_script_path[job_id]
script_path_to_tried_times[script_path] += 1
if script_path_to_tried_times[script_path] <= retry:
print(f'Job {job_id} failed, retry {script_path} '
f'{script_path_to_tried_times[script_path]}/{retry}.')
queue_job_path_list.append(script_path)
else:
# add the last job_id into finished
finished_job_id_set.add(job_id)
print(f'{script_path} failed after {retry + 1} attempts.')
# status will be judged in the end
else:
# still running
new_running_job_id_set.add(job_id)
pass
running_job_id_set = new_running_job_id_set
print(f'{len(running_job_id_set)} running job IDs: {", ".join(running_job_id_set)}')
# submit new jobs
while (remaining_slots > 0) and (len(queue_job_path_list) > 0):
print(f'Remaining slots: {remaining_slots}')
script_path = queue_job_path_list.pop()
# skip if job already submitted and are successful before
if str(script_path) in successful_script_paths:
print(f'Already successful in previous submission: {script_path}')
continue
job_id = submit_sbatch(script_path)
running_job_id_set.add(job_id)
job_id_to_script_path[job_id] = script_path
remaining_slots -= 1
# sleep
sleepy += 30
sleepy = min(300, sleepy)
time.sleep(sleepy)
# only check status if something has finished
if len(finished_job_id_set) > 0:
# check status
chunk_size = 50
stats = []
finished_job_ids = list(finished_job_id_set)
for i in range(0, len(finished_job_ids), chunk_size):
job_chunk = finished_job_ids[i: i + chunk_size]
stats.append(sacct(job_chunk))
sacct_df = | pd.concat(stats) | pandas.concat |
import os
from datetime import datetime, date
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fbprophet import Prophet
class Detector:
def __init__(
self,
min_time_points: int = 10,
none_zero_ratio: float = 0.0,
min_dataset_size: int = 0,
image_path: str = 'image.png'
) -> None:
self.ds_min_points = min_time_points
self.none_zero_ratio = none_zero_ratio
self.min_dataset_size = min_dataset_size
self.image_path = image_path
self.x_column_name = 'ds'
self.y_column_name = 'y'
def forecast_today(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""
Forecast today based on history dataset and mark today as anomaly if it's outside of forecasted range
Input should an array of json objects having `time` & `value` fields
Output is an array of json objects having today's forecast & anomaly
:param dataset:
pd.DataFrame([{"time": "2018-02-13", "value": 1069}, {"time": "2018-02-14", "value": 3000}, ...])
data should be aggregated per day for example there should be only one entry (value) for each day
:return: pd.DataFrame of anomalies
each Series has "ds", "trend", "trend_lower", "trend_upper", "yhat_lower", "yhat_upper", "seasonal",
"seasonal_lower", "seasonal_upper", "seasonalities", "seasonalities_lower", "seasonalities_upper",
"weekly", "weekly_lower", "weekly_upper", "yhat", "std", "actual"
For more info check https://facebook.github.io/prophet/
"""
dataset = self._validate_input(dataset)
historical_data = dataset[:-1]
last_day_of_data = dataset[-1:]
todays_forecast = self._get_forecast(historical_data, last_day_of_data)
return self._compare(historical_data, last_day_of_data, todays_forecast)
def _validate_input(self, dataset: pd.DataFrame) -> pd.DataFrame:
x_column_name = 'time'
y_column_name = 'value'
if x_column_name not in dataset.columns or y_column_name not in dataset.columns:
raise ValueError('dataset should have [{}] & [{}] columns'.format(x_column_name, y_column_name))
dataset = dataset.rename(columns={x_column_name: self.x_column_name, y_column_name: self.y_column_name})
dataset[self.x_column_name].apply(lambda t: t.strftime('%Y-%m-%d') if isinstance(t, date) else t)
return dataset
def _get_forecast(self, data: pd.DataFrame, actual: pd.DataFrame) -> pd.DataFrame:
actual_time_points = len(data)
actual_dataset_size = data[self.y_column_name].sum()
if actual_time_points < self.ds_min_points or (
len(data[data[self.y_column_name] == 0]) / len(data) > self.none_zero_ratio
) or actual_dataset_size < self.min_dataset_size:
return | pd.DataFrame() | pandas.DataFrame |
import configparser
import importlib
import numpy as np
import pandas as pd
###############################################################################
#Non-Standard Import
###############################################################################
try:
from . import model_handler as mh
from . import settings_handler as sh
from .utils_settings import *
except:
import model_handler as mh
import settings_handler as sh
from utils_settings import *
###############################################################################
#Interfacing with Configparser
###############################################################################
def from_config(filename):
'''Opens a config file and reads the fields/subfields required for setting up
the analysis while ignoring the irrelavant ones. Returns a dictionary of the
collected information.
:param filename: Name of file to read.
:type filename: str
'''
config = configparser.RawConfigParser()
config.optionxform = lambda option: option
config_data = {}
with open(filename, 'r') as file:
config.read_file(file)
n = 1
for section in config.sections():
if not is_analysis_settings(config, section):
continue
init = config[section]['init']
params = config[section]['parameter_values']
tspan = config[section]['tspan']
solver_args = config[section].get('solver_args')
init = eval_init_string(init)
tspan = eval_tspan_string(tspan)
params = eval_params_string(params)
solver_args = string_to_dict(solver_args) if solver_args else {}
config_data[n] = {'system_type': section, 'init': init, 'params': params, 'tspan': tspan, 'solver_args': solver_args}
n += 1
return config_data
###############################################################################
#Main Set Up
###############################################################################
def get_models_and_params(filename, user_core_models={}):
'''Reads the config file and adds combines it with core_model data. If you are
using a core model that is not in the database, you must provide the core_model
using the core_models argument where the key is the system_type. Returns the
models, params and config_data.
:param filename: Name of file to read.
:type filename: str
:param user_core_model: A dictionary of core_models indexed by their system_type.
core_models already in the database do not need to be specified here.
:type user_core_model: dict, optional
'''
config_data, core_models = setup_helper(filename, from_config, user_core_models)
models, params = compile_models(core_models, config_data)
return models, params, config_data
def compile_models(core_models, config_data):
models = make_compiled_models_template(core_models)
params = {}
for key in config_data:
models[key]['init'] = config_data[key].get('init', {1: np.array([0]*len(models[key]['states'])) })
models[key]['tspan'] = config_data[key].get('tspan', [np.linspace(0, 600, 31)])
models[key]['int_args']['solver_args'] = config_data[key].get('solver_args', {})
try:
temp = {param + '_' + str(key): config_data[key]['params'][param].values for param in config_data[key]['params']}
except:
temp = {param + '_' + str(key): config_data[key]['guess'][param].values for param in config_data[key]['guess']}
params = {**params, **temp}
return models, | pd.DataFrame(params) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs_mixed_dtypes(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame(
[[1, 3, 5], ["x", "y", "z"]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]
)
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame(
[[1, 3, 5]] + [[10, 11, 12]] * (n + 1),
columns=["a", "b", "c"],
dtype="object",
)
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemCallable:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH#13299
def inc(x):
return x + 1
df = DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemBooleanMask:
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
@pytest.mark.parametrize(
"mask_type",
[lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values],
ids=["dataframe", "array"],
)
def test_setitem_boolean_mask(self, mask_type, float_frame):
# Test for issue #18582
df = float_frame.copy()
mask = mask_type(df)
# index with boolean mask
result = df.copy()
result[mask] = np.nan
expected = df.copy()
expected.values[np.array(mask)] = np.nan
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
from scipy.io import loadmat
import pandas as pd
import datetime as date
from dateutil.relativedelta import relativedelta
cols = ['age', 'gender', 'path', 'face_score1', 'face_score2']
imdb_mat = 'imdb_crop/imdb.mat'
wiki_mat = 'wiki_crop/wiki.mat'
imdb_data = loadmat(imdb_mat)
wiki_data = loadmat(wiki_mat)
del imdb_mat, wiki_mat
imdb = imdb_data['imdb']
wiki = wiki_data['wiki']
imdb_photo_taken = imdb[0][0][1][0]
imdb_full_path = imdb[0][0][2][0]
imdb_gender = imdb[0][0][3][0]
imdb_face_score1 = imdb[0][0][6][0]
imdb_face_score2 = imdb[0][0][7][0]
wiki_photo_taken = wiki[0][0][1][0]
wiki_full_path = wiki[0][0][2][0]
wiki_gender = wiki[0][0][3][0]
wiki_face_score1 = wiki[0][0][6][0]
wiki_face_score2 = wiki[0][0][7][0]
imdb_path = []
wiki_path = []
for path in imdb_full_path:
imdb_path.append('imdb_crop/' + path[0])
for path in wiki_full_path:
wiki_path.append('wiki_crop/' + path[0])
imdb_genders = []
wiki_genders = []
for n in range(len(imdb_gender)):
if imdb_gender[n] == 1:
imdb_genders.append('male')
else:
imdb_genders.append('female')
for n in range(len(wiki_gender)):
if wiki_gender[n] == 1:
wiki_genders.append('male')
else:
wiki_genders.append('female')
imdb_dob = []
wiki_dob = []
for file in imdb_path:
temp = file.split('_')[3]
temp = temp.split('-')
if len(temp[1]) == 1:
temp[1] = '0' + temp[1]
if len(temp[2]) == 1:
temp[2] = '0' + temp[2]
if temp[1] == '00':
temp[1] = '01'
if temp[2] == '00':
temp[2] = '01'
imdb_dob.append('-'.join(temp))
for file in wiki_path:
wiki_dob.append(file.split('_')[2])
imdb_age = []
wiki_age = []
for i in range(len(imdb_dob)):
try:
d1 = date.datetime.strptime(imdb_dob[i][0:10], '%Y-%m-%d')
d2 = date.datetime.strptime(str(imdb_photo_taken[i]), '%Y')
rdelta = relativedelta(d2, d1)
diff = rdelta.years
except Exception as ex:
print(ex)
diff = -1
imdb_age.append(diff)
for i in range(len(wiki_dob)):
try:
d1 = date.datetime.strptime(wiki_dob[i][0:10], '%Y-%m-%d')
d2 = date.datetime.strptime(str(wiki_photo_taken[i]), '%Y')
rdelta = relativedelta(d2, d1)
diff = rdelta.years
except Exception as ex:
print(ex)
diff = -1
wiki_age.append(diff)
final_imdb = np.vstack((imdb_age, imdb_genders, imdb_path, imdb_face_score1, imdb_face_score2)).T
final_wiki = np.vstack((wiki_age, wiki_genders, wiki_path, wiki_face_score1, wiki_face_score2)).T
final_imdb_df = pd.DataFrame(final_imdb)
final_wiki_df = pd.DataFrame(final_wiki)
final_imdb_df.columns = cols
final_wiki_df.columns = cols
meta = | pd.concat((final_imdb_df, final_wiki_df)) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from rayml.data_checks import (
DataCheckActionCode,
DataCheckActionOption,
DataCheckMessageCode,
DataCheckWarning,
IDColumnsDataCheck,
)
id_data_check_name = IDColumnsDataCheck.name
def test_id_cols_data_check_init():
id_cols_check = IDColumnsDataCheck()
assert id_cols_check.id_threshold == 1.0
id_cols_check = IDColumnsDataCheck(id_threshold=0.0)
assert id_cols_check.id_threshold == 0
id_cols_check = IDColumnsDataCheck(id_threshold=0.5)
assert id_cols_check.id_threshold == 0.5
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.id_threshold == 1.0
with pytest.raises(
ValueError, match="id_threshold must be a float between 0 and 1, inclusive."
):
IDColumnsDataCheck(id_threshold=-0.1)
with pytest.raises(
ValueError, match="id_threshold must be a float between 0 and 1, inclusive."
):
IDColumnsDataCheck(id_threshold=1.1)
def test_id_columns_warning():
X_dict = {
"col_1_id": [0, 1, 2, 3],
"col_2": [2, 3, 4, 5],
"col_3_id": [1, 1, 2, 3],
"Id": [3, 1, 2, 0],
"col_5": [0, 0, 1, 2],
"col_6": [0.1, 0.2, 0.3, 0.4],
}
X = pd.DataFrame.from_dict(X_dict)
id_cols_check = IDColumnsDataCheck(id_threshold=0.95)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id', 'col_2', 'col_3_id' are 95.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
)
],
).to_dict(),
]
X = pd.DataFrame.from_dict(X_dict)
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id' are 100.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id"]},
)
],
).to_dict(),
]
def test_id_columns_strings():
X_dict = {
"col_1_id": ["a", "b", "c", "d"],
"col_2": ["w", "x", "y", "z"],
"col_3_id": [
"123456789012345",
"234567890123456",
"3456789012345678",
"45678901234567",
],
"Id": ["z", "y", "x", "a"],
"col_5": ["0", "0", "1", "2"],
"col_6": [0.1, 0.2, 0.3, 0.4],
}
X = pd.DataFrame.from_dict(X_dict)
X.ww.init(
logical_types={
"col_1_id": "categorical",
"col_2": "categorical",
"Id": "categorical",
"col_5": "categorical",
}
)
id_cols_check = IDColumnsDataCheck(id_threshold=0.95)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id', 'col_2', 'col_3_id' are 95.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
)
],
).to_dict(),
]
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id' are 100.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id"]},
)
],
).to_dict(),
]
def test_id_cols_data_check_input_formats():
id_cols_check = IDColumnsDataCheck(id_threshold=0.8)
# test empty pd.DataFrame
assert id_cols_check.validate( | pd.DataFrame() | pandas.DataFrame |
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pytest import approx
from evidently.pipeline import column_mapping
from evidently.analyzers.classification_performance_analyzer import ClassificationPerformanceAnalyzer
from evidently.analyzers.classification_performance_analyzer import ClassificationPerformanceMetrics
from evidently.analyzers.classification_performance_analyzer import ConfusionMatrix
@pytest.fixture
def analyzer() -> ClassificationPerformanceAnalyzer:
return ClassificationPerformanceAnalyzer()
@pytest.mark.parametrize(
"reference_data, current_data, data_mapping",
(
# prediction dataset only, current dataset is missed
(
pd.DataFrame({"target": [1, 0, 1, 1, 0, 1], "prediction": [1, 1, 0, 1, 0, 1]}),
None,
column_mapping.ColumnMapping(),
),
# prediction dataset is missed
(
pd.DataFrame({"target": [1, 0, 1, 1, 0, 1], "prediction": [1, 1, 0, 1, 0, 1]}),
| pd.DataFrame({"target": [1, 0, 0, 1, 1, 1], "prediction": [0, 1, 0, 1, 0, 0]}) | pandas.DataFrame |
import glob
import pandas as pd
files = glob.glob('Corpus_mda/*')
files.sort()
df_agg1 = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 17:14:53 2019
@author: liuhongbing
"""
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc
import tensorflow as tf
from sklearn.model_selection import train_test_split
# 加载数据集
def read_data(file_path):
column_names = ['user-id', 'activity', 'timestamp', 'x-axis', 'y-axis', 'z-axis']
data = pd.read_csv(file_path, header=None, names=column_names)
data['z-axis'] = data['z-axis'].apply(lambda x : str(x).split(";")[0])
data['z-axis'] = data['z-axis'].astype('float32')
return data
# 数据标准化
def feature_normalize(dataset):
mu = np.mean(dataset, axis=0)
print('mu:',mu)
sigma = np.std(dataset, axis=0)
print('sigma:',sigma)
return (dataset - mu) / sigma
# 创建时间窗口,90 × 50ms,也就是 4.5 秒,每次前进 45 条记录,半重叠的方式。
def windows(data, size):
start = 0
while start < data.count():
yield start, start + size
start += (size / 2)
# 创建输入数据,每一组数据包含 x, y, z 三个轴的 90 条连续记录,
# 用 `stats.mode` 方法获取这 90 条记录中出现次数最多的行为
# 作为该组行为的标签,这里有待商榷,其实可以完全使用同一种行为的数据记录
# 来创建一组数据用于输入的。
def segment_signal(data, window_size=128):
segments = np.empty((0, window_size, 3))
labels = np.empty((0))
print (len(data['timestamp']))
count = 0
for (start, end) in windows(data['timestamp'], window_size):
print (count)
start = int(start)
end = int(end)
count += 1
x = data["x-axis"][start:end]
y = data["y-axis"][start:end]
z = data["z-axis"][start:end]
if (len(data['timestamp'][start:end]) == window_size):
segments = np.vstack([segments, np.dstack([x, y, z])])
labels = np.append(labels, stats.mode(data["activity"][start:end])[0][0])
else:
return segments, labels
return segments, labels
def get_train_test():
root = "/Users/liuhongbing/Documents/tensorflow/data/WISDM_ar_v1.1/"
dataset2 = read_data(root +'WISDM_ar_v1.1_raw.txt')
dataset2.fillna(0, inplace=True)
print("dataset2:", len(dataset2))
dataset = dataset2[:300000]
print("dataset:", len(dataset))
dataset['x-axis'] = feature_normalize(dataset['x-axis'])
dataset['y-axis'] = feature_normalize(dataset['y-axis'])
dataset['z-axis'] = feature_normalize(dataset['z-axis'])
segments, labels = segment_signal(dataset)
labels = np.asarray( | pd.get_dummies(labels) | pandas.get_dummies |
import copy
import math
import sys
import numpy.random as rnd
from datetime import datetime
import pandas as pd
from datetime import timedelta
import traceback
from heuristic.construction.construction import ConstructionHeuristic
from config.construction_config import *
from heuristic.improvement.reopt.reopt_repair_generator import ReOptRepairGenerator
class ReOptOperators:
def __init__(self, alns, sim_clock, vehicle_clocks):
self.destruction_degree = alns.destruction_degree
self.constructor = alns.constructor
self.T_ij = self.constructor.T_ij
self.reopt_repair_generator = ReOptRepairGenerator(self.constructor)
self.sim_clock = sim_clock
self.vehicle_clocks = vehicle_clocks
# Find number of requests to remove based on degree of destruction
def nodes_to_remove(self, route_plan):
# Count number of requests in route_plan
total_requests = 0
for row in route_plan:
for col in row:
if col[0]:
total_requests += 0.5
# Calculate number of requests to remove
num_remove = math.ceil(total_requests * self.destruction_degree)
return num_remove
def random_removal(self, current_route_plan, current_infeasible_set):
destroyed_route_plan = copy.deepcopy(current_route_plan)
to_remove = []
removed_requests = []
index_removed_requests = []
possible_removals = self.find_possible_removals(destroyed_route_plan)
empty = 0
for vehicle in possible_removals:
empty += len(vehicle)
if not empty:
return current_route_plan, removed_requests, index_removed_requests, False
# Number of requests to remove
num_remove = self.nodes_to_remove(possible_removals)
# Find the requests to remove
while len(to_remove)/2 < num_remove:
# Pick random node in route plan to remove and to compare other nodes to
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
rnd.shuffle(rows)
for row in rows:
if len(possible_removals[row]) == 2:
col = 0
break
else:
col = rnd.randint(
0, len(possible_removals[row]))
break
node = possible_removals[row][col]
destroy_node = destroyed_route_plan[row][node[6]]
# Find col-index of associated pickup/drop-off node
index, pickup = self.find_associated_node(
row, col, possible_removals)
associated_node = possible_removals[row][index]
destroy_associated_node = destroyed_route_plan[row][associated_node[6]]
# Skip already added nodes
if [node, row, destroy_node] in to_remove or [associated_node, row, destroy_associated_node] in to_remove:
continue
# Add both pickup and drop-off node to to_remove
to_remove.append([node, row, destroy_node])
to_remove.append([associated_node, row, destroy_associated_node])
# Remove nearest nodes from destroyed route plan and from possible_removals
for n in to_remove:
index_removed_requests.append(
(n[0][0], n[1], n[0][6]))
for n in to_remove:
possible_removals[n[1]].remove(n[0])
destroyed_route_plan[n[1]].remove(n[2])
# Add request id to removed_requests
if not n[0][0] % int(n[0][0]):
removed_requests.append((n[0][0], n[0][5]))
return destroyed_route_plan, removed_requests, index_removed_requests, True
def worst_deviation_removal(self, current_route_plan, current_infeasible_set):
destroyed_route_plan = copy.deepcopy(current_route_plan)
to_remove = []
removed_requests = []
index_removed_requests = []
possible_removals = self.find_possible_removals(destroyed_route_plan)
empty = 0
for vehicle in possible_removals:
empty += len(vehicle)
if not empty:
return current_route_plan, removed_requests, index_removed_requests, False
# Number of requests to remove
num_remove = self.nodes_to_remove(possible_removals)
# Find the requests to remove
for j in range(num_remove):
worst_deviation = timedelta(0)
worst_node = None
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
for row in rows:
for col in range(0, len(possible_removals[row])):
temp = possible_removals[row][col]
destroyed_temp = destroyed_route_plan[row][temp[6]]
# Skip already added nodes
if [temp, row, destroyed_temp] in to_remove:
continue
# Find associated drop off/pickup node
index, pickup = self.find_associated_node(
row, col, possible_removals)
associated_temp = possible_removals[row][index]
destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]
temp_deviation = temp[2]
associated_temp_deviation = associated_temp[2]
if temp_deviation < timedelta(0):
temp_deviation = timedelta(
seconds=-temp_deviation.total_seconds())
if associated_temp_deviation < timedelta(0):
associated_temp_deviation = timedelta(
seconds=-associated_temp_deviation.total_seconds())
# Calculate total deviation for request
deviation = temp_deviation + associated_temp_deviation
# Update worst deviation so far
if deviation > worst_deviation and deviation > timedelta(0):
worst_deviation = deviation
worst_node = [temp, row, destroyed_temp]
worst_associated_node = [
associated_temp, row, destroyed_associated_temp]
# Add node with worst deviation to list of nodes to remove
if worst_node is not None and worst_node in to_remove:
continue
if worst_node is not None:
to_remove.append(worst_node)
to_remove.append(worst_associated_node)
# If not enough nodes have deviation > 0, remove the rest randomly
if len(to_remove)/2 < num_remove:
to_remove = self.worst_deviation_random_removal(
destroyed_route_plan, possible_removals, num_remove, to_remove)
# Remove nearest nodes from destroyed route plan and from possible_removals
for n in to_remove:
index_removed_requests.append(
(n[0][0], n[1], n[0][6]))
for n in to_remove:
possible_removals[n[1]].remove(n[0])
destroyed_route_plan[n[1]].remove(n[2])
# Add request id to removed_requests
if not n[0][0] % int(n[0][0]):
removed_requests.append((n[0][0], n[0][5]))
return destroyed_route_plan, removed_requests, index_removed_requests, True
# Related in travel time
def distance_related_removal(self, current_route_plan, current_infeasible_set):
destroyed_route_plan = copy.deepcopy(current_route_plan)
removed_requests = []
index_removed_requests = []
possible_removals = self.find_possible_removals(destroyed_route_plan)
empty = 0
for vehicle in possible_removals:
empty += len(vehicle)
if not empty:
return current_route_plan, removed_requests, index_removed_requests, False
# Number of requests to remove
num_remove = self.nodes_to_remove(possible_removals)
if len(current_infeasible_set) != 0:
# Pick random node in infeasible_set to compare other nodes to - always pickup nodes
initial_node = current_infeasible_set[rnd.randint(
0, len(current_infeasible_set))]
node = self.get_pickup(initial_node)
pickup = True
# Find associated node - dropoff node
associated_node = self.get_dropoff(initial_node)
to_remove = []
else:
# Pick random node in route plan to remove and to compare other nodes to
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
rnd.shuffle(rows)
for row_index in rows:
if len(possible_removals[row_index]) == 2:
col_index = 0
break
else:
col_index = rnd.randint(
0, len(possible_removals[row_index]))
break
node = possible_removals[row_index][col_index]
destroy_node = destroyed_route_plan[row_index][node[6]]
# Find associated node
index, pickup = self.find_associated_node(
row_index, col_index, possible_removals)
associated_node = possible_removals[row_index][index]
destroy_associated_node = destroyed_route_plan[row_index][associated_node[6]]
# List of nodes to remove
to_remove = [[node, row_index, destroy_node], [
associated_node, row_index, destroy_associated_node]]
# Remaining number of nodes to remove
num_remove -= 1
# Find the requests to remove
for j in range(num_remove):
# To do: finne ut hva denne initielt skal settes som
best_diff = 48 * 60 * 60
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
for row in rows:
for col in range(0, len(possible_removals[row])):
# Drop off/pickup of request to compare
temp = possible_removals[row][col]
destroyed_temp = destroyed_route_plan[row][temp[6]]
# Skip already added nodes
if [temp, row, destroyed_temp] in to_remove:
continue
# Find associated drop off/pickup node of request to compare
temp_index, temp_pickup = self.find_associated_node(
row, col, possible_removals)
associated_temp = possible_removals[row][temp_index]
destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]
# Find difference in distance between pickup and drop-off of requests
if (temp_pickup == pickup) & pickup:
diff = self.travel_time_difference(temp[0], node[0])
elif (temp_pickup == pickup) & (not pickup):
diff = self.travel_time_difference(
associated_temp[0], associated_node[0])
elif (temp_pickup != pickup) & pickup:
diff = self.travel_time_difference(
associated_temp[0], node[0])
else:
diff = self.travel_time_difference(
temp[0], associated_node[0])
# Compare with smallest difference in current iteration
if diff < best_diff:
best_diff = diff
nearest_node = [temp, row, destroyed_temp]
nearest_associated_node = [
associated_temp, row, destroyed_associated_temp]
to_remove.append(nearest_node)
to_remove.append(nearest_associated_node)
# Remove nearest nodes from destroyed route plan and from possible_removals
for n in to_remove:
index_removed_requests.append(
(n[0][0], n[1], n[0][6]))
for n in to_remove:
possible_removals[n[1]].remove(n[0])
destroyed_route_plan[n[1]].remove(n[2])
# Add request id to removed_requests
if not n[0][0] % int(n[0][0]):
removed_requests.append((n[0][0], n[0][5]))
return destroyed_route_plan, removed_requests, index_removed_requests, True
# Related in service time
def time_related_removal(self, current_route_plan, current_infeasible_set):
destroyed_route_plan = copy.deepcopy(current_route_plan)
removed_requests = []
index_removed_requests = []
possible_removals = self.find_possible_removals(destroyed_route_plan)
empty = 0
for vehicle in possible_removals:
empty += len(vehicle)
if not empty:
return current_route_plan, removed_requests, index_removed_requests, False
# Number of requests to remove
num_remove = self.nodes_to_remove(possible_removals)
if len(current_infeasible_set) != 0:
# Pick random node in infeasible_set to compare other nodes to - always pickup nodes
initial_node = current_infeasible_set[rnd.randint(
0, len(current_infeasible_set))]
node = self.get_pickup(initial_node)
pickup = True
# Find associated node - dropoff node
associated_node = self.get_dropoff(initial_node)
to_remove = []
else:
# Pick random node in route plan to remove and to compare other nodes to
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
rnd.shuffle(rows)
for row_index in rows:
if len(possible_removals[row_index]) == 2:
col_index = 0
break
else:
col_index = rnd.randint(
0, len(possible_removals[row_index]))
break
node = possible_removals[row_index][col_index]
destroy_node = destroyed_route_plan[row_index][node[6]]
# Find associated node
index, pickup = self.find_associated_node(
row_index, col_index, possible_removals)
associated_node = possible_removals[row_index][index]
destroy_associated_node = destroyed_route_plan[row_index][associated_node[6]]
# List of nodes to remove
to_remove = [[node, row_index, destroy_node], [
associated_node, row_index, destroy_associated_node]]
# Remaining number of nodes to remove
num_remove -= 1
# Find the requests to remove
for j in range(num_remove):
# To do: finne ut hva denne initielt skal settes som
best_diff = 48 * 60 * 60
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
for row in rows:
for col in range(0, len(possible_removals[row])):
temp = possible_removals[row][col]
destroyed_temp = destroyed_route_plan[row][temp[6]]
# Skip already added nodes
if [temp, row, destroyed_temp] in to_remove:
continue
# Find associated drop off/pickup node
temp_index, temp_pickup = self.find_associated_node(
row, col, possible_removals)
associated_temp = possible_removals[row][temp_index]
destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]
# Find difference between pickup-times and drop off-times of requests
if temp_pickup == pickup:
diff = self.time_difference(
temp, node, associated_temp, associated_node)
else:
diff = self.time_difference(
temp, associated_node, associated_temp, node)
# Compare with smallest difference in current iteration
if diff < best_diff:
best_diff = diff
nearest_node = [temp, row, destroyed_temp]
nearest_associated_node = [
associated_temp, row, destroyed_associated_temp]
to_remove.append(nearest_node)
to_remove.append(nearest_associated_node)
# Remove nearest nodes from destroyed route plan and from possible_removals
for n in to_remove:
index_removed_requests.append(
(n[0][0], n[1], n[0][6]))
for n in to_remove:
possible_removals[n[1]].remove(n[0])
destroyed_route_plan[n[1]].remove(n[2])
# Add request id to removed_requests
if not n[0][0] % int(n[0][0]):
removed_requests.append((n[0][0], n[0][5]))
return destroyed_route_plan, removed_requests, index_removed_requests, True
# Related in both service time and travel time
def related_removal(self, current_route_plan, current_infeasible_set):
destroyed_route_plan = copy.deepcopy(current_route_plan)
removed_requests = []
index_removed_requests = []
possible_removals = self.find_possible_removals(destroyed_route_plan)
empty = 0
for vehicle in possible_removals:
empty += len(vehicle)
if not empty:
return current_route_plan, removed_requests, index_removed_requests, False
# Number of requests to remove
num_remove = self.nodes_to_remove(possible_removals)
if len(current_infeasible_set) != 0:
# Pick random node in infeasible_set to compare other nodes to - always pickup nodes
initial_node = current_infeasible_set[rnd.randint(
0, len(current_infeasible_set))]
node = self.get_pickup(initial_node)
pickup = True
# Find associated node - dropoff node
associated_node = self.get_dropoff(initial_node)
to_remove = []
else:
# Pick random node in route plan to remove and to compare other nodes to
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
rnd.shuffle(rows)
for row_index in rows:
if len(possible_removals[row_index]) == 2:
col_index = 0
break
else:
col_index = rnd.randint(
0, len(possible_removals[row_index]))
break
node = possible_removals[row_index][col_index]
destroy_node = destroyed_route_plan[row_index][node[6]]
# Find associated node
index, pickup = self.find_associated_node(
row_index, col_index, possible_removals)
associated_node = possible_removals[row_index][index]
destroy_associated_node = destroyed_route_plan[row_index][associated_node[6]]
# List of nodes to remove
to_remove = [[node, row_index, destroy_node], [
associated_node, row_index, destroy_associated_node]]
# Remaining number of nodes to remove
num_remove -= 1
# Find the requests to remove
for j in range(num_remove):
# To do: finne ut hva denne initielt skal settes som
best_diff = 48 * 60 * 60
rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]
for row in rows:
for col in range(0, len(possible_removals[row])):
temp = possible_removals[row][col]
destroyed_temp = destroyed_route_plan[row][temp[6]]
# Skip already added nodes
if [temp, row, destroyed_temp] in to_remove:
continue
# Find associated drop off/pickup node
temp_index, temp_pickup = self.find_associated_node(
row, col, possible_removals)
associated_temp = possible_removals[row][temp_index]
destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]
# Find difference between requests
if (temp_pickup == pickup) & pickup:
diff_distance = self.travel_time_difference(
temp[0], node[0])
diff_time = self.time_difference(
temp, node, associated_temp, associated_node)
elif (temp_pickup == pickup) & (not pickup):
diff_distance = self.travel_time_difference(
associated_temp[0], associated_node[0])
diff_time = self.time_difference(
temp, node, associated_temp, associated_node)
elif (temp_pickup != pickup) & pickup:
diff_distance = self.travel_time_difference(
associated_temp[0], node[0])
diff_time = self.time_difference(
temp, associated_node, associated_temp, node)
else:
diff_distance = self.travel_time_difference(
temp[0], associated_node[0])
diff_time = self.time_difference(
temp, associated_node, associated_temp, node)
diff = diff_distance + diff_time
# Compare with smallest difference in current iteration
if diff < best_diff:
best_diff = diff
nearest_node = [temp, row, destroyed_temp]
nearest_associated_node = [
associated_temp, row, destroyed_associated_temp]
to_remove.append(nearest_node)
to_remove.append(nearest_associated_node)
# Remove nearest nodes from destroyed route plan and from possible_removals
for n in to_remove:
index_removed_requests.append(
(n[0][0], n[1], n[0][6]))
for n in to_remove:
possible_removals[n[1]].remove(n[0])
destroyed_route_plan[n[1]].remove(n[2])
# Add request id to removed_requests
if not n[0][0] % int(n[0][0]):
removed_requests.append((n[0][0], n[0][5]))
return destroyed_route_plan, removed_requests, index_removed_requests, True
# Repair operators
def greedy_repair(self, destroyed_route_plan, removed_requests, initial_infeasible_set, current_route_plan, index_removed_requests, delayed, still_delayed_nodes):
unassigned_requests = removed_requests.copy() + initial_infeasible_set.copy()
unassigned_requests.sort(key=lambda x: x[0])
route_plan = copy.deepcopy(destroyed_route_plan)
current_objective = timedelta(0)
infeasible_set = []
unassigned_requests = pd.DataFrame(unassigned_requests)
for i in range(unassigned_requests.shape[0]):
# while not unassigned_requests.empty:
rid = unassigned_requests.iloc[i][0]
request = unassigned_requests.iloc[i][1]
index_removal = [
i for i in index_removed_requests if i[0] == rid or i[0] == rid+0.5]
route_plan, new_objective, infeasible_set, vehicle_clocks = self.reopt_repair_generator.generate_insertions(
route_plan=route_plan, request=request, rid=rid, infeasible_set=infeasible_set,
initial_route_plan=current_route_plan, index_removed=index_removal, sim_clock=self.sim_clock, objectives=False, delayed=delayed, still_delayed_nodes=still_delayed_nodes,
vehicle_clocks=self.vehicle_clocks)
self.vehicle_clocks = vehicle_clocks
# update current objective
current_objective = new_objective
return route_plan, current_objective, infeasible_set
def regret_2_repair(self, destroyed_route_plan, removed_requests, initial_infeasible_set, current_route_plan, index_removed_requests, delayed, still_delayed_nodes):
unassigned_requests = removed_requests.copy() + initial_infeasible_set.copy()
unassigned_requests.sort(key=lambda x: x[0])
route_plan = copy.deepcopy(destroyed_route_plan)
current_objective = timedelta(0)
infeasible_set = []
unassigned_requests = | pd.DataFrame(unassigned_requests) | pandas.DataFrame |
"""This modules contains code to be executed after the anonymization kernel has been run"""
import logging
import datetime
import pandas as pd
from anytree import AnyNode
from tqdm import tqdm
logger = logging.getLogger(__name__)
class PostProcessor():
"""The postprocessor will actually recode sensitive terms and make a pretty version of the anonymized dataframe"""
def __init__(self, config, pp):
self.__config = config
self.__preprocessor = pp
def clean(self, df):
"""
Takes a dataframe and drops all helper attributes
Parameters
----------
df: DateFrame
DataFrame to clean.
Returns
-------
DataFrame
Cleaned DataFrame.
"""
df = df.drop(self.__preprocessor.get_non_redundant_entity_attributes(), axis=1)
df = df.drop(self.__preprocessor.get_redundant_entity_attributes(), axis=1)
return df
def uncompress(self, df):
"""
Takes a dataframe and uncompresses it using the first textual attribute available
Parameters
----------
df: DateFrame
DataFrame to uncompress.
Returns
-------
DataFrame
Uncompressed DataFrame.
"""
column_to_uncompress = None
if len(self.__config.get_textual_attributes()) > 0:
column_to_uncompress = self.__config.get_textual_attributes()[0] # Take first column to uncompress
logger.info("Uncompressing dataframe on attribute %s", column_to_uncompress)
if column_to_uncompress:
uncompressed_df = pd.DataFrame(columns=df.columns)
for index in tqdm(range(len(df)), total=len(df), desc="Uncompressing"):
if isinstance(df.loc[index, column_to_uncompress], list):
insensitive_attributes = self.__config.get_insensitive_attributes()
textual_attributes = self.__config.get_textual_attributes()
to_drop = textual_attributes + insensitive_attributes
raw_row = df.drop(to_drop, axis=1).loc[index]
for ii in range(len(df.loc[index, column_to_uncompress])):
row = raw_row
for insensitive_attribute in insensitive_attributes:
value_to_append = df.loc[index, insensitive_attribute][ii]
row = row.append(pd.Series([value_to_append], index=[insensitive_attribute]))
for textual_attribute in textual_attributes:
text_to_append = df.loc[index, textual_attribute][ii]
row = row.append(pd.Series([text_to_append], index=[textual_attribute]))
uncompressed_df = uncompressed_df.append(row, ignore_index=True)
else:
data_row = df.loc[index]
uncompressed_df = uncompressed_df.append(data_row, ignore_index=True)
return uncompressed_df
def pretty(self, df):
"""
Takes a dataframe and makes values in columns pretty
Parameters
----------
df: DateFrame
DataFrame to uncompress.
Returns
-------
DataFrame
Prettyfied DataFrame.
"""
pretty_df = | pd.DataFrame(columns=df.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 9 15:00:37 2019
@author: <NAME>
@contact: <EMAIL>
"""
import numpy as np
import pandas as pd
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
def print_array(A):
data_frame = pd.DataFrame(A).round(3)
| pd.set_option('precision', 3) | pandas.set_option |
import itertools
import os
import random
import tempfile
from unittest import mock
import pandas as pd
import pytest
import pickle
import numpy as np
import string
import multiprocessing as mp
from copy import copy
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, assert_categorical_equal
from dask import delayed
from dask.base import compute_as_if_collection
from dask.optimization import cull
from dask.dataframe.shuffle import (
shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd,
remove_nans,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.dataframe._compat import PANDAS_GT_120
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=17, max_branch=4)
sc = s.compute(scheduler="sync")
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, "b", shuffle=method))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[["b"]], shuffle=method).compute()
res2 = shuffle(d, ["b"], shuffle=method).compute()
res3 = shuffle(d, "b", shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_tasks(npartitions):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle="tasks"))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle="tasks"))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle="tasks"))
assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle="tasks"))
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle="tasks"))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle="tasks"))
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_self_index(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize("shuffle", ["tasks"])
def test_set_index_names(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle).dask) == set(
ddf.set_index("x", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", shuffle=shuffle).dask) != set(
ddf.set_index("y", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle).dask
)
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle)
df2.value.sum().compute(scheduler="sync")
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
pool = ctx.Pool(processes=8)
with pool:
results = [pool.apply_async(_set_index, (ddf, "x")) for _ in range(100)]
divisions_set = set(result.get() for result in results)
assert len(divisions_set) == 1
def _set_index(df, *args, **kwargs):
return df.set_index(*args, **kwargs).divisions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
def test_set_index():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 4])
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == set([612509347682975743, 616762138058293247])
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=1)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=1)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
if PANDAS_GT_120:
# starting with pandas 1.2.0, comparing equality of timestamps with different
# timezones returns False instead of raising an error
assert not d2.divisions[0] == s2badtype[0]
else:
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
def test_set_index_npartitions():
# https://github.com/dask/dask/issues/6974
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"]
)
)
data = dd.from_pandas(data, npartitions=2)
output = data.reset_index().set_index("index", npartitions=1)
assert output.npartitions == 1
@pytest.mark.parametrize("unit", ["ns", "us"])
def test_set_index_datetime_precision(unit):
# https://github.com/dask/dask/issues/6864
df = pd.DataFrame(
[
[1567703791155681, 1],
[1567703792155681, 2],
[1567703790155681, 0],
[1567703793155681, 3],
],
columns=["ts", "rank"],
)
df.ts = pd.to_datetime(df.ts, unit=unit)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.set_index("ts")
assert_eq(ddf, df.set_index("ts"))
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = | pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]}) | pandas.DataFrame |
import dash # pip install dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Output, Input
from dash_extensions import Lottie # pip install dash-extensions
import dash_bootstrap_components as dbc # pip install dash-bootstrap-components
import plotly.express as px # pip install plotly
import pandas as pd # pip install pandas
from datetime import date
import calendar
from wordcloud import WordCloud # pip install wordcloud
# Lottie by Emil - https://github.com/thedirtyfew/dash-extensions
url_coonections = "https://assets9.lottiefiles.com/private_files/lf30_5ttqPi.json"
url_companies = "https://assets9.lottiefiles.com/packages/lf20_EzPrWM.json"
url_msg_in = "https://assets9.lottiefiles.com/packages/lf20_8wREpI.json"
url_msg_out = "https://assets2.lottiefiles.com/packages/lf20_Cc8Bpg.json"
url_reactions = "https://assets2.lottiefiles.com/packages/lf20_nKwET0.json"
options = dict(loop=True, autoplay=True, rendererSettings=dict(preserveAspectRatio='xMidYMid slice'))
# Import App data from csv sheets **************************************
df_cnt = pd.read_csv("https://raw.githubusercontent.com/Coding-with-Adam/Dash-by-Plotly/master/Analytic_Web_Apps/Linkedin_Analysis/Connections.csv")
df_cnt["Connected On"] = pd.to_datetime(df_cnt["Connected On"])
df_cnt["month"] = df_cnt["Connected On"].dt.month
df_cnt['month'] = df_cnt['month'].apply(lambda x: calendar.month_abbr[x])
df_invite = pd.read_csv("https://raw.githubusercontent.com/Coding-with-Adam/Dash-by-Plotly/master/Analytic_Web_Apps/Linkedin_Analysis/Invitations.csv")
df_invite["Sent At"] = pd.to_datetime(df_invite["Sent At"])
df_react = pd.read_csv("https://raw.githubusercontent.com/Coding-with-Adam/Dash-by-Plotly/master/Analytic_Web_Apps/Linkedin_Analysis/Reactions.csv")
df_react["Date"] = pd.to_datetime(df_react["Date"])
df_msg = pd.read_csv("https://raw.githubusercontent.com/Coding-with-Adam/Dash-by-Plotly/master/Analytic_Web_Apps/Linkedin_Analysis/messages.csv")
df_msg["DATE"] = | pd.to_datetime(df_msg["DATE"]) | pandas.to_datetime |
# vim: set fdm=indent:
'''
___
/ | ____ ___ ____ _____ ____ ____
/ /| | / __ `__ \/ __ `/_ / / __ \/ __ \
/ ___ |/ / / / / / /_/ / / /_/ /_/ / / / /
/_/ |_/_/ /_/ /_/\__,_/ /___/\____/_/ /_/
______ __
/ ____/___ ________ _________ ______/ /_
/ /_ / __ \/ ___/ _ \/ ___/ __ `/ ___/ __/
/ __/ / /_/ / / / __/ /__/ /_/ (__ ) /_
/_/ \____/_/ \___/\___/\__,_/____/\__/
___ __ __
/ | _____________ / /__ _________ _/ /_____ _____
/ /| |/ ___/ ___/ _ \/ / _ \/ ___/ __ `/ __/ __ \/ ___/
/ ___ / /__/ /__/ __/ / __/ / / /_/ / /_/ /_/ / /
/_/ |_\___/\___/\___/_/\___/_/ \__,_/\__/\____/_/
GITHUB:
https://github.com/aws-samples/simple-forecat-solution/
USAGE:
streamlit run -- ./app.py --local-dir LOCAL_DIR [--landing-page-url URL]
OPTIONS:
--local-dir LOCAL_DIR /path/to/ a local directory from which the UI
will look for files.
--landing-page-url URL URL of the AFA landing page
'''
import os
import sys
import io
import glob
import time
import datetime
import base64
import pathlib
import textwrap
import argparse
import re
import json
import logging
import gzip
import gc
import boto3
import numpy as np
import pandas as pd
import awswrangler as wr
import streamlit as st
import plotly.express as pex
import plotly.graph_objects as go
import cloudpickle
import gzip
from collections import OrderedDict, deque, namedtuple
from concurrent import futures
from urllib.parse import urlparse
from toolz.itertoolz import partition_all
from botocore.exceptions import ClientError
from sspipe import p, px
from streamlit import session_state as state
from textwrap import dedent
from stqdm import stqdm
from afa import (load_data, resample, run_pipeline, run_cv_select,
calc_smape, calc_wape,
make_demand_classification, process_forecasts, make_perf_summary,
make_health_summary, GROUP_COLS, EXP_COLS)
from lambdamap import LambdaExecutor, LambdaFunction
from awswrangler.exceptions import NoFilesFound
from streamlit import caching
from streamlit.uploaded_file_manager import UploadedFile
from streamlit.script_runner import RerunException
from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
from joblib import Parallel, delayed
from humanfriendly import format_timespan
ST_STATIC_PATH = pathlib.Path(st.__path__[0]).joinpath("static")
ST_DOWNLOADS_PATH = ST_STATIC_PATH.joinpath("downloads")
LAMBDAMAP_FUNC = "AfaLambdaMapFunction"
LOCAL_DIR = "/home/ec2-user/SageMaker"
if not os.path.exists(ST_DOWNLOADS_PATH):
ST_DOWNLOADS_PATH.mkdir()
FREQ_MAP = OrderedDict(Daily="D", Weekly="W-MON", Monthly="MS")
FREQ_MAP_AFC = OrderedDict(Daily="D", Weekly="W", Monthly="M")
FREQ_MAP_LONG = {
"D": "Daily", "W-MON": "Weekly", "W": "Weekly", "M": "Monthly",
"MS": "Monthly"
}
FREQ_MAP_PD = {
"D": "D",
"W": "W-MON",
"W-SUN": "W-MON",
"W-MON": "W-MON",
"M": "MS",
"MS": "MS"
}
METRIC = "smape"
MAX_LAMBDAS = 1000
def validate(df):
"""Validate a dataset.
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"missing **{col}** column")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
is_valid_file = len(err_msgs) == 0
return df, msgs, is_valid_file
@st.cache
def load_file(path):
"""
"""
if path.endswith(".csv.gz"):
compression = "gzip"
elif path.endswith(".csv"):
compression = None
else:
raise NotImplementedError
return pd.read_csv(path, dtype={"timestamp": str}, compression=compression)
def _sum(y):
if np.all(pd.isnull(y)):
return np.nan
return np.nansum(y)
def _resample(df2, freq):
df2 = df2.groupby(["channel", "family", "item_id"]) \
.resample(freq) \
.demand \
.sum(min_count=1)
return df2
def process_data(df, freq, chunksize=None):
"""
"""
df["timestamp"] = pd.DatetimeIndex(df["timestamp"])
df.set_index("timestamp", inplace=True)
groups = df.groupby(["channel", "family", "item_id"], sort=False)
if chunksize is None:
chunksize = min(groups.ngroups, 1000)
total = int(np.ceil(groups.ngroups / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total, desc="Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df.index.name = None
return df
class StreamlitExecutor(LambdaExecutor):
"""Custom LambdaExecutor to display a progress bar in the app.
"""
def map(self, func, payloads, local_mode=False):
"""
"""
if local_mode:
f = func
else:
f = LambdaFunction(func, self._client, self._lambda_arn)
ex = self._executor
wait_for = [ex.submit(f, *p["args"], **p["kwargs"]) for p in payloads]
return wait_for
def display_progress(wait_for, desc=None):
"""
"""
# display progress of the futures
pbar = stqdm(desc=desc, total=len(wait_for))
prev_n_done = 0
n_done = sum(f.done() for f in wait_for)
while n_done != len(wait_for):
diff = n_done - prev_n_done
pbar.update(diff)
prev_n_done = n_done
n_done = sum(f.done() for f in wait_for)
time.sleep(0.25)
diff = n_done - prev_n_done
pbar.update(diff)
return
def run_lambdamap(df, horiz, freq):
"""
"""
payloads = []
freq = FREQ_MAP_PD[freq]
if freq[0] == "W":
cv_periods = None
cv_stride = 2
elif freq[0] == "M":
cv_periods = None
cv_stride = 1
else:
raise NotImplementedError
from toolz.itertoolz import partition
from tqdm.auto import tqdm
#with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
# resample the dataset to the forecast frequency before running
# lambdamap
start = time.time()
df2 = get_df_resampled(df, freq)
print(f"completed in {format_timespan(time.time()-start)}")
groups = df2.groupby(GROUP_COLS, as_index=False, sort=False)
# generate payload
for _, dd in groups:
payloads.append(
{"args": (dd, horiz, freq),
"kwargs": {"metric": "smape",
"cv_periods": cv_periods, "cv_stride": cv_stride}})
# launch jobs in chunks of 1000
executor = StreamlitExecutor(max_workers=min(MAX_LAMBDAS, len(payloads)),
lambda_arn=LAMBDAMAP_FUNC)
wait_for = executor.map(run_cv_select, payloads)
display_progress(wait_for, "🔥 Generating forecasts")
return wait_for
def get_df_resampled(df, freq):
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(1000, groups.ngroups)
total = int(np.ceil(float(groups.ngroups) / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total,
desc="Batch Preparation Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df2 = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df2 = _resample(df, freq).reset_index(["channel", "family", "item_id"])
df2.index.name = None
state["report"]["data"]["df2"] = df2
return df2
def display_ag_grid(df, auto_height=False, paginate=False,
comma_cols=None, selection_mode=None, use_checkbox=False):
"""
Parameters
----------
df : pd.DataFrame
auto_height : bool
pagination : bool
comma_cols : tuple or list
Numeric columns to apply comma thousands separator.
"""
gb = GridOptionsBuilder.from_dataframe(df)
#gb.configure_selection("single")
gb.configure_auto_height(auto_height)
gb.configure_pagination(enabled=paginate)
if selection_mode is not None:
gb.configure_selection(selection_mode=selection_mode,
use_checkbox=use_checkbox)
comma_renderer = JsCode(textwrap.dedent("""
function(params) {
return params.value
.toString()
.split( /(?=(?:\d{3})+(?:\.|$))/g ).join( "," )
}
"""))
for col in comma_cols:
gb.configure_column(col, cellRenderer=comma_renderer)
response = AgGrid(df, gridOptions=gb.build(), allow_unsafe_jscode=True)
return response
def valid_launch_freqs():
data_freq = state.report["data"]["freq"]
valid_freqs = ["D", "W", "M"]
if data_freq in ("D",):
# don't allow daily forecasting yet
valid_freqs = valid_freqs[1:]
elif data_freq in ("W","W-MON",):
valid_freqs = valid_freqs[1:]
elif data_freq in ("M","MS",):
valid_freqs = valid_freqs[2:]
else:
raise NotImplementedError
return valid_freqs
def create_presigned_url(s3_path, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket_name = parsed_url.netloc
object_name = parsed_url.path.strip("/")
# Generate a presigned URL for the S3 object
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
def make_df_backtests(df_results, parallel=False):
"""Expand df_results to a "long" dataframe with the columns:
channel, family, item_id, timestamp, actual, backtest.
"""
def _expand(dd):
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
df = pd.DataFrame({"timestamp": ts, "demand": ys, "backtest": yp})
return df
groups = df_results.query("rank == 1") \
.groupby(["channel", "family", "item_id"],
as_index=True, sort=False)
if parallel:
df_backtests = groups.parallel_apply(_expand)
else:
df_backtests = groups.apply(_expand)
df_backtests["timestamp"] = pd.DatetimeIndex(df_backtests["timestamp"])
return df_backtests.reset_index(["channel", "family", "item_id"])
def save_report(report_fn):
"""
"""
if "report" not in state or "name" not in state["report"]:
return
if "path" not in state["report"]["data"]:
st.warning(textwrap.dedent(f"""
Warning: unable to save report, no input data was loaded.
"""))
return
start = time.time()
with st.spinner(":hourglass_flowing_sand: Saving Report ..."):
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
local_path = f'/tmp/{report_fn}'
# save the report locally
cloudpickle.dump(state["report"], gzip.open(local_path, "wb"))
# upload the report to s3
s3_path = \
f'{state["report"]["afa"]["s3_afa_reports_path"]}/{report_fn}'
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket = parsed_url.netloc
key = parsed_url.path.strip("/")
s3_client = boto3.client("s3")
try:
response = s3_client.upload_file(local_path, bucket, key)
signed_url = create_presigned_url(s3_path)
st.info(textwrap.dedent(f"""
The report can be downloaded [here]({signed_url}).
"""))
except ClientError as e:
logging.error(e)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def make_df_reports(bucket, prefix):
s3 = boto3.client("s3")
df = pd.DataFrame()
df["filename"] = \
[e['Key'] for p in s3.get_paginator("list_objects_v2")
.paginate(Bucket=bucket, Prefix=prefix) for e in p['Contents']]
#df["s3_path"] = "s3://" + bucket + "/" + df["filename"]
df["filename"] = df["filename"].apply(os.path.basename)
return df
#
# Panels
#
def make_mask(df, channel, family, item_id):
mask = np.ones(len(df)).astype(bool)
# only mask when all three keys are non-empty
if channel == "" or family == "" or item_id == "":
return ~mask
mask &= df["channel"].str.upper() == channel.upper()
mask &= df["family"].str.upper() == family.upper()
mask &= df["item_id"].str.upper() == item_id.upper()
return mask
@st.cache
def make_downloads(df_pred, df_results):
"""
"""
pred_fn = os.path.join(ST_DOWNLOADS_PATH,
f"{state.uploaded_file.name}_fcast.csv.gz")
results_fn = os.path.join(ST_DOWNLOADS_PATH,
f"{state.uploaded_file.name}_results.csv.gz")
state.df_pred.to_csv(pred_fn, index=False, compression="gzip")
state.df_results.to_csv(results_fn, index=False, compression="gzip")
return pred_fn, results_fn
def _info(s):
st.info(textwrap.dedent(s))
def _success(s):
st.success(textwrap.dedent(s))
def _write(s):
st.write(textwrap.dedent(s))
def panel_create_report(expanded=True):
"""Display the 'Load Data' panel.
"""
def _load_data(path):
if path.endswith(".csv"):
compression = None
elif path.endswith(".csv.gz"):
compression = "gzip"
else:
raise NotImplementedError
df = pd.read_csv(path,
dtype={"timestamp": str, "channel": str, "family": str,
"item_id": str}, compression=compression)
return df
default_name = state["report"].get("name", None)
file_path = state["report"]["data"].get("path", None)
freq = state["report"]["data"].get("freq", None)
st.markdown("## Create Report")
with st.beta_expander("⬆️ Load + Validate Data", expanded=expanded):
st.write(f"""Step 1 – Create a new forecast report by selecting an uploaded
file containing the demand history for your use-case. You must also specify
the frequency of the demand (e.g. _Daily_, _Weekly_, or _Monthly_). Demand
history files are uploaded using the [SageMaker Notebook interface]({state["landing_page_url"]})""")
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
btn_refresh_files = st.button("Refresh Files", help="Refresh the _File_ selector with recently uploaded files.")
with st.form("create_report_form"):
report_name = st.text_input("Report Name (optional)",
help="You may optionally give this report a name, otherwise one will be automatically generated.")
_cols = st.beta_columns([3,1])
with _cols[0]:
fn = file_selectbox(
"File (.csv or .csv.gz files)", args.local_dir,
help="This file contains the demand history as either a `.csv` or `.csv.gz` file.")
with _cols[1]:
freq = st.selectbox("Frequency", list(s for s in FREQ_MAP.values() if s != 'D'),
format_func=lambda s: FREQ_MAP_LONG[s],
help="This input file must contain demand history at a _daily_, _weekly_, or _monthly_ frequency.")
btn_validate = st.form_submit_button("Load & Validate")
if btn_validate:
start = time.time()
if fn is None:
st.error(textwrap.dedent("""
**Error**
No files were selected.
1. Upload your file(s).
2. Click the **Refresh Files** button.
3. Select the file from the dropdown box.
4. Select the **Frequency**.
5. Click the **Validate** button.
####
"""))
st.stop()
if report_name == "":
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
report_name = f"AfaReport_{now_str}"
if report_name != "" and re.match(r"^[A-Za-z0-9-_]*$", report_name) is None:
st.error(dedent("""
The report name may only contain:
- uppercase letters
- lowercase letters
- numbers
- dashes ('-')
- underscores ('_')
####
"""))
else:
# temporarily load the file for validation and store it in state
# iff the data is valid
with st.spinner(":hourglass_flowing_sand: Validating file ..."):
df, msgs, is_valid_file = validate(_load_data(fn))#.drop(["timestamp", "channel"], axis=1))
if is_valid_file:
with st.spinner(":hourglass_flowing_sand: Processing file ..."):
state.report["name"] = report_name
state.report["data"]["path"] = fn
state.report["data"]["sz_bytes"] = os.path.getsize(fn)
state.report["data"]["freq"] = freq
# impute missing dates from the validated dataframe, this
# will fill in the missing timestamps with null demand values
# state.report["data"]["df"] = \
# load_data(df, impute_freq=state.report["data"]["freq"])
state.report["data"]["df"] = \
process_data(df,state.report["data"]["freq"])
state.report["data"]["is_valid"] = True
# clear any existing data health check results, this forces
# a rechecking of data health
state.report["data"]["df_health"] = None
st.text(f"(completed in {format_timespan(time.time() - start)})")
else:
err_bullets = "\n".join("- " + s for s in msgs["errors"])
st.error(f"**Validation failed**\n\n{err_bullets}")
if state.report["data"].get("is_valid", False):
_success(f"""
`{os.path.basename(state.report["data"]["path"])}` is **valid**
""")
return
def panel_load_report(expanded=True):
"""
"""
def format_func(s):
if s == "local":
return "Local Filesystem"
elif s == "s3":
return "☁️ S3"
s3 = boto3.client("s3")
st.markdown("## Load Report")
with st.beta_expander("📂 Load Report", expanded=expanded):
st.write(f"""Optional – Alternatively, you can load a previously-generated
report. Report files must have the `.pkl.gz` file extension and can be uploaded
using the [SageMaker Notebook interface]({state["landing_page_url"]}).""")
report_source = st.radio("Source", ["local"], format_func=format_func)
_cols = st.beta_columns([3,1])
with _cols[0]:
if report_source == "local":
fn = file_selectbox("File", os.path.join(args.local_dir),
globs=("*.pkl.gz",))
elif report_source == "s3":
pass
else:
raise NotImplementedError
load_report_btn = st.button("Load", key="load_report_btn")
with _cols[1]:
st.write("##")
st.button("Refresh Files", key="refresh_report_files_btn")
if load_report_btn:
start = time.time()
with st.spinner(":hourglass_flowing_sand: Loading Report ..."):
state["report"] = cloudpickle.load(gzip.open(fn, "rb"))
st.text(f"(completed in {format_timespan(time.time() - start)})")
state["prev_state"] = "report_loaded"
return
def panel_data_health():
"""
"""
df = state.report["data"].get("df", None)
df_health = state.report["data"].get("df_health", None)
freq = state.report["data"].get("freq", None)
if df is None:
return
st.header("Data Health")
with st.beta_expander("❤️ Data Health", expanded=True):
st.write(f"""Step 2 – Inspect the characteristics of the dataset
for irregularities prior to generating any forecasts. For example,
missing channels, families, item IDs; or unusually short/long
timeseries lengths.""")
with st.spinner("Performing data health check ..."):
start = time.time()
# check iff required
if df_health is None:
df_health = make_health_summary(df, state.report["data"]["freq"])
# save the health check results
state.report["data"]["df_health"] = df_health
# calc. ranked series by demand
state.report["data"]["df_ranks"] = \
df.groupby(["channel", "family", "item_id"]) \
.agg({"demand": sum}) \
.sort_values(by="demand", ascending=False)
num_series = df_health.shape[0]
num_channels = df_health["channel"].nunique()
num_families = df_health["family"].nunique()
num_item_ids = df_health["item_id"].nunique()
first_date = df_health['timestamp_min'].dt.strftime('%Y-%m-%d').min()
last_date = df_health['timestamp_max'].dt.strftime('%Y-%m-%d').max()
if freq == 'D':
duration_unit = 'D'
duration_str = 'days'
elif freq in ("W", "W-MON",):
duration_unit = 'W'
duration_str = 'weeks'
elif freq in ("M", "MS",):
duration_unit = 'M'
duration_str = 'months'
else:
raise NotImplementedError
duration = pd.Timestamp(last_date).to_period(duration_unit) - \
pd.Timestamp(first_date).to_period(duration_unit)
pc_missing = \
df_health["demand_missing_dates"].sum() / df_health["demand_len"].sum()
with st.beta_container():
_cols = st.beta_columns(3)
with _cols[0]:
st.markdown("#### Summary")
st.text(textwrap.dedent(f"""
No. series:\t{num_series}
No. channels:\t{num_channels}
No. families:\t{num_families}
No. item IDs:\t{num_item_ids}
"""))
with _cols[1]:
st.markdown("#### Timespan")
st.text(f"Frequency:\t{FREQ_MAP_LONG[freq]}\n"
f"Duration:\t{duration.n} {duration_str}\n"
f"First date:\t{first_date}\n"
f"Last date:\t{last_date}\n")
#f"% missing:\t{int(np.round(pc_missing*100,0))}")
with _cols[2]:
st.markdown("#### Timeseries Lengths")
fig = pex.box(df_health, x="demand_nonnull_count", height=160)
fig.update_layout(
margin={"t": 5, "b": 0, "r": 0, "l": 0},
xaxis_title=duration_str,
height=100
)
st.plotly_chart(fig, use_container_width=True)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def panel_launch():
"""
"""
def _format_func(short):
if short == "local":
s = " Local"
if short == "lambdamap":
s = "AWS Lambda"
return s
df = state.report["data"].get("df", None)
df_health = state.report["data"].get("df_health", None)
horiz = state.report["afa"].get("horiz", None)
freq = state.report["afa"].get("freq", None)
if df is None or df_health is None:
return
st.header("Statistical Forecasts")
with st.beta_expander("🚀 Launch", expanded=True):
st.write(f"""Step 3 – Generate forecasts by training and evaluating 75+
configurations of [statistical forecasting
models](https://otexts.com/fpp3/) for each timeseries in
parallel using AWS Lambda. A forecast at the desired _horizon length_ and
_frequency_ is then generated using the each individual timeseries' best model.
This process typically completes at a rate of 500–1,000 timeseries/min.
""")
with st.form("afa_form"):
with st.beta_container():
_cols = st.beta_columns(3)
with _cols[0]:
horiz = st.number_input("Horizon Length", value=1, min_value=1)
with _cols[1]:
freq = st.selectbox("Forecast Frequency", valid_launch_freqs(), 0,
format_func=lambda s: FREQ_MAP_LONG[s])
with _cols[2]:
backend = st.selectbox("Compute Backend",
["lambdamap"], 0, _format_func)
btn_launch = st.form_submit_button("Launch")
if btn_launch:
start = time.time()
# save form data
state.report["afa"]["freq"] = freq
state.report["afa"]["horiz"] = horiz
state.report["afa"]["backend"] = backend
df = state.report["data"]["df"]
freq_in = state.report["data"]["freq"]
freq_out = state.report["afa"]["freq"]
if backend == "local":
wait_for = \
run_pipeline(df, freq_in, freq_out, metric=METRIC,
cv_stride=2, backend="futures", horiz=horiz)
display_progress(wait_for, "🔥 Generating forecasts")
raw_results = [f.result() for f in futures.as_completed(wait_for)]
elif backend == "lambdamap":
with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
all_raw_results = []
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(5000, groups.ngroups)
# divide the dataset into chunks
df["grp"] = groups.ngroup() % int(np.ceil(groups.ngroups / chunksize))
groups = df.groupby("grp", sort=False)
total = df["grp"].nunique()
for _, dd in stqdm(groups, total=total, desc="Overall Progress"):
wait_for = run_lambdamap(dd, horiz, freq_out)
raw_results = [f.result() for f in futures.as_completed(wait_for)]
all_raw_results.extend(raw_results)
raw_results = all_raw_results
else:
raise NotImplementedError
with st.spinner("⏳ Calculating results ..."):
# generate the results and predictions as dataframes
df_results, df_preds, df_model_dist, best_err, naive_err = \
process_forecasts(wait_for, METRIC)
# generate the demand classifcation info
df_demand_cln = make_demand_classification(df, freq_in)
# save results and forecast data
state.report["afa"]["df_results"] = df_results
state.report["afa"]["df_preds"] = df_preds
state.report["afa"]["df_demand_cln"] = df_demand_cln
state.report["afa"]["df_model_dist"] = df_model_dist
state.report["afa"]["best_err"] = best_err
state.report["afa"]["naive_err"] = naive_err
state.report["afa"]["job_duration"] = time.time() - start
job_duration = state.report["afa"].get("job_duration", None)
if job_duration:
st.text(f"(completed in {format_timespan(job_duration)})")
return
def panel_accuracy():
"""
"""
df = state.report["data"].get("df", None)
df_demand_cln = state.report["afa"].get("df_demand_cln", None)
df_results = state.report["afa"].get("df_results", None)
df_model_dist = state["report"]["afa"].get("df_model_dist", None)
best_err = state["report"]["afa"].get("best_err", None)
naive_err = state["report"]["afa"].get("naive_err", None)
horiz = state.report["afa"].get("horiz", None)
freq_out = state.report["afa"].get("freq", None)
if df is None or df_results is None or df_model_dist is None:
return
def _calc_metrics(dd, metric="smape"):
if metric == "smape":
metric_func = calc_smape
elif metric == "wape":
metric_func = calc_wape
else:
raise NotImplementedError
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
return metric_func(ys, yp)
df_acc = df_results.groupby(["channel", "family", "item_id"], as_index=False, sort=True) \
.apply(lambda dd: _calc_metrics(dd, METRIC)) \
.rename({None: METRIC}, axis=1)
with st.beta_expander("🎯 Forecast Summary", expanded=True):
_write(f"""
Step 4 – The forecast error is calculated as the [symmetric
mean absolute percentage error
(SMAPE)](https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error)
via sliding window backtesting. Forecast _accuracy_ is calculated as
`100-SMAPE` and is averaged across all timeseries to give the _overall
accuracy_. The overall accuracy of the best naive models is used as a baseline.
The _classification_ distribution indicates the percentage timeseries
that have a _short_, _medium_, or _continuous_ lifecycle. The _Best Models_ chart
shows the distribution of each model type that were selected as the best model
across the dataset.
""")
df_cln = pd.DataFrame({"category": ["short", "medium", "continuous"]})
df_cln = df_cln.merge(
df_demand_cln["category"]
.value_counts(normalize=True)
.reset_index()
.rename({"index": "category", "category": "frac"}, axis=1),
on="category", how="left"
)
df_cln = df_cln.fillna(0.0)
df_cln["frac"] *= 100
df_cln["frac"] = df_cln["frac"].astype(int)
_cols = st.beta_columns(3)
with _cols[0]:
st.markdown("#### Parameters")
st.text(f"Horiz. Length:\t{horiz}\n"
f"Frequency:\t{FREQ_MAP_LONG[freq_out]}")
st.markdown("#### Classification")
st.text(f"Short:\t\t{df_cln.iloc[0]['frac']} %\n"
f"Medium:\t\t{df_cln.iloc[1]['frac']} %\n"
f"Continuous:\t{df_cln.iloc[2]['frac']} %")
with _cols[1]:
st.markdown("#### Best Models")
df_model_dist = df_model_dist.query("perc > 0")
labels = df_model_dist["model_type"].values
values = df_model_dist["perc"].values
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=0.40)])
fig.update(layout_showlegend=False)
fig.update_layout(
margin={"t": 0, "b": 0, "r": 20, "l": 20},
width=200,
height=150,
)
#fig.update_traces(textinfo="percent+label", texttemplate="%{label} – %{percent:.1%f}")
fig.update_traces(textinfo="percent+label")
st.plotly_chart(fig)
acc_val = (1 - np.nanmean(df_acc[METRIC])) * 100.
acc_naive = (1 - naive_err.err_mean) * 100.
with _cols[2]:
st.markdown("#### Overall Accuracy")
st.markdown(
f"<div style='font-size:36pt;font-weight:bold'>{acc_val:.0f}%</div>"
f"({np.clip(acc_val - acc_naive, 0, None):.0f}% increase vs. naive)",
unsafe_allow_html=True)
return
@st.cache()
def make_df_top(df, df_results, groupby_cols, dt_start, dt_stop, cperc_thresh,
metric="smape"):
"""
"""
def calc_period_metrics(dd, dt_start, dt_stop):
"""
"""
dt_start = pd.Timestamp(dt_start)
dt_stop = pd.Timestamp(dt_stop)
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ix = (ts >= dt_start) & (ts <= dt_stop)
ys = np.hstack(dd["y_cv"].apply(np.hstack))[ix]
yp = np.hstack(dd["yp_cv"].apply(np.hstack))[ix]
if metric == "smape":
error = calc_smape(ys, yp)
elif metric == "wape":
error = calc_wape(ys, yp)
else:
raise NotImplementedError
return error
metric_name = f"{metric}_mean"
df.index.name = "timestamp"
dt_start = pd.Timestamp(dt_start).strftime("%Y-%m-%d")
dt_stop = | pd.Timestamp(dt_stop) | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with | tm.assertRaises(ValueError) | pandas.util.testing.assertRaises |
"""This code is part of caerus and is not designed for usage of seperate parts."""
#--------------------------------------------------------------------------
# Name : caerus.py
# Author : E.Taskesen
# Contact : <EMAIL>
# Date : May. 2020
#--------------------------------------------------------------------------
from caerus.utils.ones2idx import ones2region, idx2region
import pandas as pd
import numpy as np
from tqdm import tqdm
import warnings
warnings.filterwarnings(action='ignore', message='Mean of empty slice')
# %% utils
def _check_input(X):
# Convert to dataframe
if isinstance(X, pd.DataFrame): raise Exception('[caerus] >Error: Input data must be of type numpy-array or list.')
if 'numpy' in str(type(X)) or 'list' in str(type(X)): X = pd.Series(X)
if X.shape[0]!=X.size: raise Exception('[caerus] >Error : Input dataframe can only be a 1D-vector.')
# reset index
X.reset_index(drop=True, inplace=True)
return X
# %% Aggregation of scores over the windows and intervals
def _agg_scores(out, threshold=0):
outagg=np.nansum(out>0, axis=1)
# Normalize for the window size that is used. Towards the end smaller windows are only avaialbe which is otherwise unfair for the threshold usage.
windowCorrectionFactor = np.ones_like(outagg)*out.shape[1]
tmpvalue = np.arange(1, out.shape[1])[::-1]
windowCorrectionFactor[-len(tmpvalue):]=tmpvalue
outagg = outagg/windowCorrectionFactor
I=outagg>threshold
return(outagg, I)
# %% Merge regions
def _get_locs_best(df, loc_start, loc_stop):
loc_start_best=np.zeros(len(loc_start)).astype(int)
loc_stop_best=np.zeros(len(loc_start)).astype(int)
for i in range(0,len(loc_start)):
loc_start_best[i]=df.iloc[loc_start[i][0]:loc_start[i][1]+1].idxmin()
tmpvalue=pd.DataFrame()
for k in range(0,len(loc_stop[i])):
idx_start=np.minimum(loc_stop[i][k][0], df.shape[0]-1)
idx_stop=np.minimum(loc_stop[i][k][1]+1, df.shape[0])
tmpvalue = | pd.concat((tmpvalue, df.iloc[idx_start:idx_stop])) | pandas.concat |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
]
expect_df_first = pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, | Timestamp('2000-01-06 00:00:00') | pandas.Timestamp |
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import glob
import json
import collections
TIMEFRAMES = [
"2017-06-12_2017-07-09_organic",
"2017-07-10_2017-08-06_organic",
"2017-08-07_2017-09-03_organic",
"2017-12-03_2017-12-30_organic",
"2018-01-01_2018-01-28_organic",
"2018-01-29_2018-02-25_organic",
"2018-02-26_2018-03-25_organic"
]
DATA_TYPES_REFERENCE = [
"reference_metadata/only_derived",
"reference_metadata/only_reference_node",
"reference_metadata/only_reference_property",
"reference_metadata/derived_+_reference_node",
"reference_metadata/derived_+_reference_property",
"reference_metadata/reference_node_+_reference_property",
"reference_metadata/all_three"
]
DATA_TYPES_QUALIFIER = [
"qualifier_metadata/property_qualifier"
]
DATA_TYPES_RANK = [
"rank_metadata/rank_property",
"rank_metadata/preferred_rank_+_rank_property",
"rank_metadata/normal_rank_+_rank_property",
"rank_metadata/deprecated_rank_+_rank_property",
"rank_metadata/preferred_+_normal_rank_+_rank_property",
"rank_metadata/preferred_+_deprecated_rank_+_rank_property",
"rank_metadata/normal_+_deprecated_rank_+_rank_property",
"rank_metadata/all_ranks_+_rank_property",
"rank_metadata/normal_rank",
"rank_metadata/deprecated_rank",
"rank_metadata/preferred_rank",
"rank_metadata/preferred_+_normal_rank",
"rank_metadata/preferred_+_deprecated_rank",
"rank_metadata/normal_+_deprecated_rank",
"rank_metadata/all_ranks",
"rank_metadata/best_rank_property",
"rank_metadata/rank_property_+_best_rank_property",
"rank_metadata/preferred_rank_+_rank_property_+_best_rank_property",
"rank_metadata/normal_rank_+_rank_property_+_best_rank_property",
"rank_metadata/deprecated_rank_+_rank_property_+_best_rank_property",
"rank_metadata/preferred_+_normal_rank_+_rank_property_+_best_rank_property",
"rank_metadata/preferred_+_deprecated_rank_+_rank_property_+_best_rank_property",
"rank_metadata/normal_+_deprecated_rank_+_rank_property_+_best_rank_property",
"rank_metadata/all_ranks_+_rank_property_+_best_rank_property",
"rank_metadata/normal_rank_+_best_rank_property",
"rank_metadata/deprecated_rank_+_best_rank_property",
"rank_metadata/preferred_rank_+_best_rank_property",
"rank_metadata/preferred_+_normal_rank_+_best_rank_property",
"rank_metadata/preferred_+_deprecated_rank_+_best_rank_property",
"rank_metadata/normal_+_deprecated_rank_+_best_rank_property",
"rank_metadata/all_ranks_+_best_rank_property"
]
def plot_redundant_detection_data_exact():
for types in [DATA_TYPES_REFERENCE, DATA_TYPES_QUALIFIER, DATA_TYPES_RANK]:
# the overall count of metadata queries
total_queries = 0
marked_queries = 0
# generate .csv ready dataframe, through a formated dictionary for ALL timeframes overall
csv_ready_dict_overall = {}
csv_ready_dict_overall["queries"] = []
csv_ready_dict_overall["total amount or marked"] = []
csv_ready_dict_overall["datatype"] = []
csv_ready_dict_overall["metadata"] = []
csv_ready_timeframe_heatmap_dict = {}
csv_ready_timeframe_heatmap_dict["metadata queries"] = []
csv_ready_timeframe_heatmap_dict["percentage on total metadata queries"] = []
csv_ready_timeframe_heatmap_dict["total amount or marked"] = []
csv_ready_timeframe_heatmap_dict["datatype"] = []
csv_ready_timeframe_heatmap_dict["metadata"] = []
csv_ready_timeframe_heatmap_dict["timeframe"] = []
# to summarize the data from the multiple reference and qualifier data
csv_ready_dict_overall_one_type = {}
csv_ready_dict_overall_one_type["queries"] = []
csv_ready_dict_overall_one_type["total amount or marked"] = []
csv_ready_dict_overall_one_type["datatype"] = []
csv_ready_dict_overall_one_type["metadata"] = []
for location in TIMEFRAMES:
# generate .csv ready dataframe, through a formated dictionary for every timeframe
csv_ready_dict_timeframe = {}
csv_ready_dict_timeframe["queries"] = []
csv_ready_dict_timeframe["total amount or marked"] = []
csv_ready_dict_timeframe["datatype"] = []
csv_ready_dict_timeframe["metadata"] = []
csv_ready_dict_timeframe_one_type = {}
csv_ready_dict_timeframe_one_type["queries"] = []
csv_ready_dict_timeframe_one_type["total amount or marked"] = []
csv_ready_dict_timeframe_one_type["datatype"] = []
csv_ready_dict_timeframe_one_type["metadata"] = []
i = 0
for type in types:
# Retrieve the generated redundant_detection data
information_path = "data/statistical_information/redundant_detection/" + location[:21] + \
"/" + type + "_renaming_information.json"
with open(information_path) as information_data:
information_dict = json.load(information_data)
# only include datatypes with at least one query
if information_dict["Total queries: "] > 0:
total_queries += information_dict["Total queries: "]
marked_queries += information_dict["Queries marked: "]
csv_ready_dict_timeframe["queries"].append(information_dict["Total queries: "])
csv_ready_dict_timeframe["total amount or marked"].append("Total Queries")
csv_ready_dict_timeframe["queries"].append(information_dict["Queries marked: "])
csv_ready_dict_timeframe["total amount or marked"].append("Queries marked")
csv_ready_timeframe_heatmap_dict["metadata queries"].\
append( information_dict["Total queries: "] )
csv_ready_timeframe_heatmap_dict["total amount or marked"].append("Total Queries")
csv_ready_timeframe_heatmap_dict["metadata queries"].\
append( information_dict["Queries marked: "] )
csv_ready_timeframe_heatmap_dict["total amount or marked"].append("Queries marked")
csv_ready_timeframe_heatmap_dict["timeframe"].append(location[:21].replace("_", " - "))
csv_ready_timeframe_heatmap_dict["timeframe"].append(location[:21].replace("_", " - "))
# format the type a bit nicer
# e.g., rank_metadata/deprecated_rank_+_rank_property -> deprecated rank & rank property
nice_type = type.split("/")[1].replace("_", " ").replace("+", "&")
csv_ready_dict_timeframe["datatype"].append(nice_type)
csv_ready_dict_timeframe["datatype"].append(nice_type)
csv_ready_timeframe_heatmap_dict["datatype"].append(nice_type)
csv_ready_timeframe_heatmap_dict["datatype"].append(nice_type)
# extract the metadata
# e.g., rank_metadata/deprecated_rank_+_rank_property -> rank metadata
metadata = type.split("/")[0].replace(" ", "")
csv_ready_dict_timeframe["metadata"].append(metadata)
csv_ready_dict_timeframe["metadata"].append(metadata)
csv_ready_timeframe_heatmap_dict["metadata"].append(metadata)
csv_ready_timeframe_heatmap_dict["metadata"].append(metadata)
# update the overall dict and summarize the timeframe values
if nice_type not in csv_ready_dict_overall["datatype"]:
csv_ready_dict_overall["queries"].append(information_dict["Total queries: "])
csv_ready_dict_overall["total amount or marked"].append("Total Queries")
csv_ready_dict_overall["queries"].append(information_dict["Queries marked: "])
csv_ready_dict_overall["total amount or marked"].append("Queries marked")
csv_ready_dict_overall["datatype"].append(nice_type)
csv_ready_dict_overall["datatype"].append(nice_type)
csv_ready_dict_overall["metadata"].append(metadata)
csv_ready_dict_overall["metadata"].append(metadata)
else:
i = csv_ready_dict_overall["datatype"].index(nice_type)
csv_ready_dict_overall["queries"][i] += information_dict["Total queries: "]
csv_ready_dict_overall["total amount or marked"][i] = "Total Queries"
csv_ready_dict_overall["queries"][i+1] += information_dict["Queries marked: "]
csv_ready_dict_overall["total amount or marked"][i+1] = "Queries marked"
csv_ready_dict_overall["datatype"][i] = nice_type
csv_ready_dict_overall["datatype"][i+1] = nice_type
csv_ready_dict_overall["metadata"][i] = metadata
csv_ready_dict_overall["metadata"][i+1] = metadata
# update the overall dict to narrow it down to one type
if len(csv_ready_dict_overall_one_type["queries"]) == 0:
csv_ready_dict_overall_one_type["queries"].append(information_dict["Total queries: "])
csv_ready_dict_overall_one_type["total amount or marked"].append("Total Queries")
csv_ready_dict_overall_one_type["queries"].append(information_dict["Queries marked: "])
csv_ready_dict_overall_one_type["total amount or marked"].append("Queries marked")
csv_ready_dict_overall_one_type["datatype"].append(nice_type)
csv_ready_dict_overall_one_type["datatype"].append(nice_type)
csv_ready_dict_overall_one_type["metadata"].append(metadata)
csv_ready_dict_overall_one_type["metadata"].append(metadata)
else:
csv_ready_dict_overall_one_type["queries"][0] += information_dict["Total queries: "]
csv_ready_dict_overall_one_type["total amount or marked"][0] = "Total Queries"
csv_ready_dict_overall_one_type["queries"][1] += information_dict["Queries marked: "]
csv_ready_dict_overall_one_type["total amount or marked"][1] = "Queries marked"
csv_ready_dict_overall_one_type["datatype"][0] = metadata
csv_ready_dict_overall_one_type["datatype"][1] = metadata
csv_ready_dict_overall_one_type["metadata"][0] = metadata
csv_ready_dict_overall_one_type["metadata"][1] = metadata
# update the overall dict to narrow it down to one type per timeframe
if len(csv_ready_dict_timeframe_one_type["queries"]) == 0:
csv_ready_dict_timeframe_one_type["queries"].append(information_dict["Total queries: "])
csv_ready_dict_timeframe_one_type["total amount or marked"].append("Total Queries")
csv_ready_dict_timeframe_one_type["queries"].append(information_dict["Queries marked: "])
csv_ready_dict_timeframe_one_type["total amount or marked"].append("Queries marked")
csv_ready_dict_timeframe_one_type["datatype"].append(nice_type)
csv_ready_dict_timeframe_one_type["datatype"].append(nice_type)
csv_ready_dict_timeframe_one_type["metadata"].append(metadata)
csv_ready_dict_timeframe_one_type["metadata"].append(metadata)
else:
csv_ready_dict_timeframe_one_type["queries"][0] += information_dict["Total queries: "]
csv_ready_dict_timeframe_one_type["total amount or marked"][0] = "Total Queries"
csv_ready_dict_timeframe_one_type["queries"][1] += information_dict["Queries marked: "]
csv_ready_dict_timeframe_one_type["total amount or marked"][1] = "Queries marked"
csv_ready_dict_timeframe_one_type["datatype"][0] = metadata
csv_ready_dict_timeframe_one_type["datatype"][1] = metadata
csv_ready_dict_timeframe_one_type["metadata"][0] = metadata
csv_ready_dict_timeframe_one_type["metadata"][1] = metadata
# insert the percentage information for the test dict with the help of the dict per metadata per timeframe
for index in range(len(csv_ready_timeframe_heatmap_dict["timeframe"])):
if csv_ready_timeframe_heatmap_dict["timeframe"][index] == location[:21].replace("_", " - "):
if index % 2 == 0:
csv_ready_timeframe_heatmap_dict["percentage on total metadata queries"]. \
append(csv_ready_timeframe_heatmap_dict["metadata queries"][index] / csv_ready_dict_timeframe_one_type["queries"][0])
else:
csv_ready_timeframe_heatmap_dict["percentage on total metadata queries"]. \
append((csv_ready_timeframe_heatmap_dict["metadata queries"][index-1]
- csv_ready_timeframe_heatmap_dict["metadata queries"][index] )
/ (csv_ready_dict_timeframe_one_type["queries"][0] -
csv_ready_dict_timeframe_one_type["queries"][1]))
# save the dict for the timeframe
timeframe_path = "data/statistical_information/redundant_detection/" + location[:21] \
+ "/" + type.split("/")[0] + "/overall_redundant_information_exact.json"
with open(timeframe_path, "w") as overall_data:
json.dump(csv_ready_dict_timeframe_one_type, overall_data)
# plot the timeframe overall data
df = | pd.DataFrame(csv_ready_dict_timeframe_one_type) | pandas.DataFrame |
# DAG schedulada para utilização dos dados do Titanic
from airflow import DAG
# Importação de operadores
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import datetime, timedelta
import pandas as pd
import zipfile
import pyodbc
import sqlalchemy
data_path = '/root/download'
# Argumentos default
default_args = {
'owner': 'diego.rech', # Dono da DAG
'depends_on_past': False, # Se DAG depende de algo acontecendo antes para iniciar o processo
'start_date': datetime(2020, 11, 30, 23), # Data de inicio do processo da DAG
'email': '<EMAIL>', # Email para ser notificado, caso configurado
'email_on_failure': False, # Para receber emails em casa de falha
'email_on_retry': False, # Para receber emails em casa de uma nova tentativa de execução
'retries': 1, # Quantas vezes uma nova tentativa deve ser feita
'retry_delay': timedelta(minutes=1) # Quanto tempo até a nova tentativa ser realizada
}
# Denifinição da DAG
dag = DAG(
'treino-04',# Nome da DAG
description='Utiliza os dados do ENADE para demonstrar o Paralelismo', # Descrição que facilita a identificação da DAG
default_args=default_args,
schedule_interval='*/10 * * * *'# Intervalo de execução utilizando cron
)
# Task que marca o inicio do processo
start_processing = BashOperator(
task_id='start_processing',
bash_command='echo "Starting Preprocessing! Vai!"',
dag=dag
)
# Baixa os dados do ENADE 2019 do site oficial
task_get_data = BashOperator(
task_id='get_data',
bash_command=f'wget -P /root/download http://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip -o {data_path}/enade_2019.zip',
dag=dag
)
def unzip_data():
with zipfile.ZipFile(f'{data_path}/enade_2019.zip', 'r') as zipped:
zipped.extractall(f'{data_path}')
# Task responsável pelo unzip do arquivo
task_unzip_data = PythonOperator(
task_id = 'unzip_data',
python_callable = unzip_data,
dag=dag
)
def apply_filter():
cols = ['CO_GRUPO', 'TP_SEXO', 'NU_IDADE', 'NT_GER', 'NT_FG', 'NT_CE', 'QE_I01', 'QE_I02', 'QE_I04', 'QE_I05', 'QE_I08']
enade = | pd.read_csv(f'{data_path}/microdados_enade_2019/2019/3.DADOS/microdados_enade_2019.txt', sep=';', decimal=',', usecols=cols) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import os
import re
import matplotlib.pyplot as plt
import numpy as np
import json
import plotly.io as pio
import plotly.offline as pl
import plotly.graph_objs as go
import plotly.express as px
from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot
init_notebook_mode(connected=True)
import shapefile as shp
pl.init_notebook_mode()
pio.renderers.default = "browser"
| pd.set_option('display.max_rows', None) | pandas.set_option |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from pandas.core.base import PandasObject
from scipy.optimize import minimize
from decorator import decorator
from sklearn.covariance import ledoit_wolf
@decorator
def mean_var_weights(func_covar, *args, **kwargs):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Args:
* args[0]: returns (DataFrame): Returns for multiple securities.
* args[1]: weight_bounds ((low, high)): Weigh limits for optimization.
* args[2]: rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* args[3]: options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
if len(args)<4:
raise Exception("Not Enough Parameters")
returns = args[0]
weight_bounds = args[1]
rf = args[2]
options = args[3]
def fitness(weights, exp_rets, covar, rf):
# portfolio mean
mean = sum(exp_rets * weights)
# portfolio var
var = np.dot(np.dot(weights, covar), weights)
# utility - i.e. sharpe ratio
util = (mean - rf) / np.sqrt(var)
# negative because we want to maximize and optimizer
# minimizes metric
return -util
n = len(returns.columns)
# expected return defaults to mean return by default
exp_rets = returns.mean()
# calc covariance matrix
covar = func_covar(returns)
weights = np.ones([n]) / n
bounds = [weight_bounds for i in range(n)]
# sum of weights must be equal to 1
constraints = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.})
optimized = minimize(fitness, weights, (exp_rets, covar, rf),
method='SLSQP', constraints=constraints,
bounds=bounds, options=options)
# check if success
if not optimized.success:
raise Exception(optimized.message)
# return weight vector
return pd.Series({returns.columns[i]: optimized.x[i] for i in range(n)})
@mean_var_weights
def mvw_standard(prices,
weight_bounds=(0.,1.),
rf = 0.,
options = None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Wraps mean_var_weights with standard covariance calculation method
Args:
* prices (DataFrame): Prices for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
r = prices.to_returns().dropna()
covar = r.cov()
return covar
@mean_var_weights
def mvw_ledoit_wolf(prices,
weight_bounds=(0.,1.),
rf = 0.,
options = None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Wraps mean_var_weights with ledoit_wolf covariance calculation method
Args:
* prices (DataFrame): Prices for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
r = prices.to_returns().dropna()
covar = ledoit_wolf(r)[0]
return covar
def _erc_weights_ccd(x0,
cov,
b,
maximum_iterations,
tolerance):
"""
Calculates the equal risk contribution / risk parity weights given
a DataFrame of returns.
Args:
* x0 (np.array): Starting asset weights.
* cov (np.array): covariance matrix.
* b (np.array): Risk target weights.
* maximum_iterations (int): Maximum iterations in iterative solutions.
* tolerance (float): Tolerance level in iterative solutions.
Returns:
np.array {weight}
Reference:
Griveau-Billion, Theophile and Richard, Jean-Charles and Roncalli,
Thierry, A Fast Algorithm for Computing High-Dimensional Risk Parity
Portfolios (2013).
Available at SSRN: https://ssrn.com/abstract=2325255
"""
n = len(x0)
x = x0.copy()
var = np.diagonal(cov)
ctr = cov.dot(x)
sigma_x = np.sqrt(x.T.dot(ctr))
for iteration in range(maximum_iterations):
for i in range(n):
alpha = var[i]
beta = ctr[i] - x[i] * alpha
gamma = -b[i] * sigma_x
x_tilde = (-beta + np.sqrt(
beta * beta - 4 * alpha * gamma)) / (2 * alpha)
x_i = x[i]
ctr = ctr - cov[i] * x_i + cov[i] * x_tilde
sigma_x = sigma_x * sigma_x - 2 * x_i * cov[i].dot(
x) + x_i * x_i * var[i]
x[i] = x_tilde
sigma_x = np.sqrt(sigma_x + 2 * x_tilde * cov[i].dot(
x) - x_tilde * x_tilde * var[i])
# check convergence
if np.power((x - x0) / x.sum(), 2).sum() < tolerance:
return x / x.sum()
x0 = x.copy()
# no solution found
raise ValueError('No solution found after {0} iterations.'.format(
maximum_iterations))
@decorator
def risk_parity_weights(func_covar, *args, **kwargs):
"""
Calculates the equal risk contribution / risk parity weights given a
DataFrame of returns.
Args:
* args[0]: returns (DataFrame): Returns or Prices for multiple securities.
* args[1]: initial_weights (list): Starting asset weights [default inverse vol].
* args[2]: risk_weights (list): Risk target weights [default equal weight].
* args[3]: risk_parity_method (str): Risk parity estimation method.
Currently supported:
- ccd (cyclical coordinate descent)[default]
* args[4]: maximum_iterations (int): Maximum iterations in iterative solutions.
* args[5]: tolerance (float): Tolerance level in iterative solutions.
Returns:
Series {col_name: weight}
"""
if len(args)<8:
raise Exception("Not Enough Parameters")
returns = args[0]
initial_weights = args[1]
risk_weights = args[2]
risk_parity_method = args[3]
maximum_iterations = args[4]
tolerance = args[5]
min_n = args[6]
max_n = args[7]
n = len(returns.columns)
# calc covariance matrix
covar = func_covar(returns)
# initial weights (default to inverse vol)
if initial_weights is None:
inv_vol = 1. / np.sqrt(np.diagonal(covar))
initial_weights = inv_vol / inv_vol.sum()
# default to equal risk weight
if risk_weights is None:
risk_weights = np.ones(n) / n
if risk_weights is not None:
min_n = min(n, min_n)
max_n = min(n, max_n)
if max_n>min_n:
#
if len(risk_weights)<n:
for i in range(min_n, n):
risk_weights.append(0.0)
else:
for i in range(min_n, n):
risk_weights[i] = 0.0
#
left_risk = 1-sum(risk_weights)
distribute_risk = left_risk/(max_n-min_n)
#
min_idx = np.argsort([covar[i,i] for i in range(min_n, len(covar))])[:max_n-min_n] + min_n
for i in min_idx:
risk_weights[i] = distribute_risk
# calc risk parity weights matrix
if risk_parity_method == 'ccd':
# cyclical coordinate descent implementation
erc_weights = _erc_weights_ccd(
initial_weights,
covar,
risk_weights,
maximum_iterations,
tolerance
)
else:
raise NotImplementedError('risk_parity_method not implemented')
# return erc weights vector
return | pd.Series(erc_weights, index=returns.columns, name='erc') | pandas.Series |
from flask import Blueprint, jsonify
from numpy import minimum
from datetime import datetime
import requests
import psycopg2
import warnings
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
warnings.filterwarnings("ignore")
btcData = Blueprint('btc', __name__)
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
@btcData.route("/btc/profit/deals", methods=["GET"])
def get_btc_deals():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT COUNT(*)/3 FROM Recommendations''')
rows = cur.fetchall()
conn.commit()
res["count"] = int(rows[0][0])
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/recommendation/sell", methods=["GET"])
def post_btc_recommendation_sell():
ACTUAL_URL = "http://localhost:9000/btc/price"
actual_res = requests.get(url=ACTUAL_URL)
actual_data = actual_res.json()[-1:]
current_time_str = actual_data[0]["datetime"].split(":")
price = actual_data[0]["price"]
current_time = datetime.now()
current_time = current_time.replace(hour=int(current_time_str[0]), minute=int(
current_time_str[1]), second=int(current_time_str[2]), microsecond=0)
cur = conn.cursor()
cur.execute(
f"INSERT INTO Recommendations (Created_at,recommendation,price) VALUES ('{str(current_time)}', 'SELL', {price})")
conn.commit()
res = {}
res["status"] = "Success"
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/recommendation/buy", methods=["GET"])
def post_btc_recommendation_buy():
ACTUAL_URL = "http://localhost:9000/btc/price"
actual_res = requests.get(url=ACTUAL_URL)
actual_data = actual_res.json()[-1:]
current_time_str = actual_data[0]["datetime"].split(":")
price = actual_data[0]["price"]
current_time = datetime.now()
current_time = current_time.replace(hour=int(current_time_str[0]), minute=int(
current_time_str[1]), second=int(current_time_str[2]), microsecond=0)
cur = conn.cursor()
cur.execute(
f"INSERT INTO Recommendations (Created_at,recommendation,price) VALUES ('{str(current_time)}', 'BUY', {price})")
conn.commit()
res = {}
res["status"] = "Success"
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/profit/details/lastOperation", methods=["GET"])
def get_btc_profit_details_last_operation():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT created_at, recommendation, price FROM Recommendations ORDER BY created_at DESC LIMIT 1''')
rows = cur.fetchall()
conn.commit()
if rows[0][1] == "BUY":
res["price"] = rows[0][2]
else:
res["price"] = 0.0
res["operation"] = rows[0][1]
res["datetime"] = str(rows[0][0]).split(" ")[1]
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/profit/details", methods=["GET"])
def get_btc_profit_details():
res = []
cur = conn.cursor()
cur.execute(
'''SELECT created_at, recommendation, price FROM Recommendations ORDER BY created_at DESC LIMIT 14''')
rows = cur.fetchall()
conn.commit()
rows.reverse()
if len(rows) > 2:
if rows[0][1] == "SELL":
rows = rows[1:]
previous_buy = 0
for i in range(len(rows)):
current_res = {}
if str(rows[i][1]) == "BUY":
previous_buy = rows[i][2]
current_res["profit/loss"] = 0.0
else:
current_res["profit/loss"] = rows[i][2] - previous_buy
current_res["datetime"] = str(rows[i][0])
current_res["recommendation"] = str(rows[i][1])
current_res["price"] = str(rows[i][2])
if str(rows[i][1]) != "BUY":
res.append(current_res)
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/profit", methods=["GET"])
def get_btc_profit():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT created_at, recommendation, price FROM Recommendations''')
rows = cur.fetchall()
conn.commit()
# if the 1st operation is sell remove it
if rows[0][1] == "SELL":
rows = rows[1:]
# if the last operation is buy remove it
if rows[len(rows) - 1][1] == "BUY":
rows = rows[:-1]
df = | pd.DataFrame(rows, columns=['time', 'recommendation', 'price']) | pandas.DataFrame |
import pandas as pd
import STRING
import numpy as np
import datetime
from sklearn.cluster import AgglomerativeClustering
from models.cluster_model import cluster_analysis
pd.options.display.max_columns = 500
# SOURCE FILE
offer_df = | pd.read_csv(STRING.path_db + STRING.file_offer, sep=',', encoding='utf-8', quotechar='"') | pandas.read_csv |
"""Test the DropTokensByList pipeline stage."""
import pandas as pd
import pdpipe as pdp
def test_drop_tokens_by_list_short():
data = [[4, ["a", "bad", "cat"]], [5, ["bad", "not", "good"]]]
df = pd.DataFrame(data, [1, 2], ["age", "text"])
filter_tokens = pdp.DropTokensByList('text', ['bad'])
res_df = filter_tokens(df)
assert 'age' in res_df.columns
assert 'text' in res_df.columns
assert 'bad' not in res_df.loc[1]['text']
assert 'a' in res_df.loc[1]['text']
assert 'cat' in res_df.loc[1]['text']
assert 'bad' not in res_df.loc[2]['text']
assert 'not' in res_df.loc[2]['text']
assert 'good' in res_df.loc[2]['text']
def test_drop_tokens_by_list_short_no_drop():
data = [[4, ["a", "bad", "cat"]], [5, ["bad", "not", "good"]]]
df = | pd.DataFrame(data, [1, 2], ["age", "text"]) | pandas.DataFrame |
import os
import sys
from pandas.core.indexes import base
sys.path.append('..')
import argparse
import datetime as dt
import pickle
import yaml
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils.class_weight import compute_class_weight
from src.data.imgproc import tf_read_image
from src.data.dataset import ImageDataset
from sklearn.decomposition import IncrementalPCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.multioutput import MultiOutputClassifier
from src.models.sklearn_models import models, param_grids
from sklearn.metrics import roc_auc_score, roc_curve, f1_score, accuracy_score
import logging
from datetime import datetime
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
logger = logging.getLogger(__file__)
def parse_args():
default_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
parser = argparse.ArgumentParser()
parser.add_argument(
"--pca_pretrained",
type=str,
default='IncrementalPCA_200_500_Random_2145_27072021.sav',
help=".sav file path for pretrained pca model.")
parser.add_argument("--preprocessing",
type=str,
default="config/default_preprocessing.yaml",
help="File path for image preprocessing.")
parser.add_argument("--file",
type=str,
default='chexpert',
help="Filename prefix. "
"You should give a meaningful name for easy tracking.")
parser.add_argument("--map",
type=str,
default='Random',
choices=['U-zero', 'U-one', 'Random'],
help="Option for mapping uncertain labels.")
parser.add_argument("--limit",
type=int,
default=5000,
help="Maximum dataset size capped.")
parser.add_argument("--path",
type=str,
default=default_dir,
help="Base path.")
parser.add_argument("--ylabels",
nargs='+',
default=[
'Atelectasis', 'Cardiomegaly', 'Consolidation',
'Edema', 'Pleural Effusion'
],
choices=[
'No Finding', 'Enlarged Cardiomediastinum',
'Cardiomegaly', 'Lung Opacity', 'Lung Lesion',
'Edema', 'Consolidation', 'Pneumonia',
'Atelectasis', 'Pneumothorax', 'Pleural Effusion',
'Pleural Other', 'Fracture', 'Support Devices'
],
help="Labels to predict.")
parser.add_argument("--model",
type=str,
default='RandomForestClassifier',
choices=[m for m in models],
help="Choice of model.")
parser.add_argument("--n_jobs",
type=int,
default=-1,
help="Number of cores for multi-processing.")
parser.add_argument("--random",
type=bool,
default=False,
help="Whether to use RandomizedSearchCV.")
args = parser.parse_args()
return args
def load_pca(args):
try:
pca_f_path = os.path.join(args.path, "models", args.pca_pretrained)
with open(pca_f_path, 'rb') as file:
pca = pickle.load(file)
logger.info(f'Pretrained pca {pca_f_path} .sav file loaded.')
logger.info(f'Pretrained pca: {pca}')
except:
logger.error(
f'Pretrained pca {pca_f_path} .sav file cannot be loaded!')
return pca
def load_dataset(args):
base_path = args.path
preprocessing_path = os.path.join(base_path, args.preprocessing)
image_path = os.path.join(base_path, "data", "raw")
train_csv_path = os.path.join(base_path, "data", "raw",
"CheXpert-v1.0-small", "train.csv")
with open(preprocessing_path, 'r') as file:
preprocessing_config = yaml.full_load(file)
train_dataset = ImageDataset(
label_csv_path=train_csv_path,
image_path_base=image_path,
limit=args.limit,
transformations=preprocessing_config["transformations"],
map_option=args.map)
return train_dataset
def search_cv(random, model, param_grid, X, y, scoring, n_jobs):
if random:
search = RandomizedSearchCV(model,
param_grid,
cv=5,
scoring=scoring,
return_train_score=True,
n_jobs=n_jobs)
else:
search = GridSearchCV(model,
param_grid,
cv=5,
scoring=scoring,
return_train_score=True,
n_jobs=n_jobs)
search.fit(X, y)
return search
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
args = parse_args()
logger.info(f'==============================================')
logger.info('Loading dataset')
train_dataset = load_dataset(args)
limit = args.limit
return_labels = args.ylabels
logger.info(f'==============================================')
logger.info(f'Running search cv on data size limit: {limit}')
logger.info(f'Labels to predict: {return_labels}')
pca = None
if args.pca_pretrained:
pca = load_pca(args)
base_model = models[args.model]
logger.info(f'model: {base_model}')
param_grid = param_grids[args.model]
logger.info(f'param_grid: {param_grid}')
logger.info(f'RandomizedSearchCV: {args.random}')
x_features_train, x_image_train, y_train_multi = train_dataset.load(
return_labels)
if pca:
x_image_train = MinMaxScaler().fit_transform(
pca.transform(x_image_train))
X_train = pd.concat(
[pd.DataFrame(x_features_train),
| pd.DataFrame(x_image_train) | pandas.DataFrame |
import pandas as pd
import numpy as np
import warnings
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from datetime import datetime, timedelta
import scipy.stats as st
import statsmodels.api as sm
import math
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import norm
from scipy import poly1d
warnings.simplefilter(action='ignore', category=Warning)
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as stats
from pandas.tseries.offsets import BDay
from plotly.subplots import make_subplots
matplotlib.rcParams['figure.figsize'] = (25.0, 15.0)
matplotlib.style.use('ggplot')
pd.set_option('display.float_format', lambda x: '%.4f' % x)
import plotly.io as pio
from numpy import median, mean
pio.templates.default = "plotly_white"
class SampleStrategy():
def shortEntry(self, prices_df):
short_entry_filter_1 = prices_df['MA NEAR'][-1] < prices_df['MA FAR'][-1]
short_entry_filter_2 = prices_df['MA NEAR'][-2] > prices_df['MA FAR'][-2]
enter_trade = short_entry_filter_1 and short_entry_filter_2
if enter_trade:
return True
else:
return False
def longEntry(self, prices_df):
long_entry_filter_1 = prices_df['MA NEAR'][-1] > prices_df['MA FAR'][-1]
long_entry_filter_2 = prices_df['MA NEAR'][-2] < prices_df['MA FAR'][-2]
enter_trade = long_entry_filter_1 and long_entry_filter_2
if enter_trade:
return True
else:
return False
def longExit(self, prices_df):
long_exit_filter_1 = prices_df['MA NEAR'][-1] < prices_df['MA FAR'][-1]
long_exit_filter_2 = prices_df['MA NEAR'][-2] > prices_df['MA FAR'][-2]
exit_trade = long_exit_filter_1 and long_exit_filter_2
if exit_trade:
return True
else:
return False
def shortExit(self, prices_df):
short_exit_filter_1 = prices_df['MA NEAR'][-1] > prices_df['MA FAR'][-1]
short_exit_filter_2 = prices_df['MA NEAR'][-2] < prices_df['MA FAR'][-2]
exit_trade = short_exit_filter_1 and short_exit_filter_2
if exit_trade:
return True
else:
return False
from functools import reduce
class Broker():
def __init__(self,
price_data=None,
MA_period_slow=200,
MA_period_fast=50):
assert price_data is not None
self.data = price_data
self.pass_history = 20
self.strategy_obj = SampleStrategy()
self.entry_price = None
self.exit_price = None
self.position = 0
self.pnl = 0
self.MA_period_slow = MA_period_slow
self.MA_period_fast = MA_period_fast
self.trade_id = -1
self.trade_type = None
self.entry_time = None
self.exit_time = None
self.exit_type = None
self.data['MA NEAR'] = self.data['Close'].rolling(self.MA_period_fast).mean()
self.data['MA FAR'] = self.data['Close'].rolling(self.MA_period_slow).mean()
self.tradeLog = pd.DataFrame(columns=['Trade ID',
'Trade Type',
'Entry Time',
'Entry Price',
'Exit Time',
'Exit Price',
'PNL',
])
def tradeExit(self):
self.tradeLog.loc[self.trade_id, 'Trade ID'] = self.trade_id
self.tradeLog.loc[self.trade_id, 'Trade Type'] = self.trade_type
self.tradeLog.loc[self.trade_id, 'Entry Time'] = pd.to_datetime(self.entry_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Entry Price'] = self.entry_price
self.tradeLog.loc[self.trade_id, 'Exit Time'] = pd.to_datetime(self.exit_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Exit Price'] = self.exit_price
self.tradeLog.loc[self.trade_id, 'PNL'] = self.pnl*1000
def testerAlgo(self):
def takeEntry():
assert self.pass_history%1==0
enterShortSignal = self.strategy_obj.shortEntry(self.data.iloc[i-self.pass_history:i+1])
enterLongSignal = self.strategy_obj.longEntry(self.data.iloc[i-self.pass_history:i+1])
if enterShortSignal == True:
self.position = -1
self.trade_id = self.trade_id + 1
self.trade_type = 'Short'
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
elif enterLongSignal == True:
self.position = 1
self.trade_id = self.trade_id + 1
self.trade_type = 'Long'
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
for i in tqdm(range(self.pass_history, len(self.data)-1)):
if self.position in [1, -1]:
if self.position == -1:
assert self.pass_history%1==0
exitShortSignal = self.strategy_obj.shortExit(self.data.iloc[i-self.pass_history:i+1])
if exitShortSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.entry_price - self.exit_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
if self.position == 1:
exitLongSignal = self.strategy_obj.longExit(self.data.iloc[i-self.pass_history:i+1])
if exitLongSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.exit_price - self.entry_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
elif self.position == 0:
takeEntry()
class TestBroker():
def __init__(self,
MA_period_slow=200,
MA_period_fast=50):
url='https://drive.google.com/file/d/1pdzeR8bYD7G_pj7XvWhcJrxnFyzmmqps/view?usp=sharing'
url2='https://drive.google.com/uc?id=' + url.split('/')[-2]
self.data = pd.read_csv(url2 ,
parse_dates=['Timestamp'],
infer_datetime_format=True,
memory_map=True,
index_col='Timestamp',
low_memory=False)
self.pass_history = 20
self.strategy_obj = SampleStrategy()
self.entry_price = None
self.exit_price = None
self.position = 0
self.pnl = 0
self.MA_period_slow = MA_period_slow
self.MA_period_fast = MA_period_fast
self.trade_id = -1
self.trade_type = None
self.entry_time = None
self.exit_time = None
self.exit_type = None
self.data['MA NEAR'] = self.data['Close'].rolling(self.MA_period_fast).mean()
self.data['MA FAR'] = self.data['Close'].rolling(self.MA_period_slow).mean()
self.tradeLog = pd.DataFrame(columns=['Trade ID',
'Trade Type',
'Entry Time',
'Entry Price',
'Exit Time',
'Exit Price',
'PNL',
])
def tradeExit(self):
self.tradeLog.loc[self.trade_id, 'Trade ID'] = self.trade_id
self.tradeLog.loc[self.trade_id, 'Trade Type'] = self.trade_type
self.tradeLog.loc[self.trade_id, 'Entry Time'] = pd.to_datetime(self.entry_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Entry Price'] = self.entry_price
self.tradeLog.loc[self.trade_id, 'Exit Time'] = pd.to_datetime(self.exit_time, infer_datetime_format= True)
self.tradeLog.loc[self.trade_id, 'Exit Price'] = self.exit_price
self.tradeLog.loc[self.trade_id, 'PNL'] = self.pnl*1000
def testerAlgo(self):
def takeEntry():
assert self.pass_history%1==0
enterShortSignal = self.strategy_obj.shortEntry(self.data.iloc[i-self.pass_history:i+1])
enterLongSignal = self.strategy_obj.longEntry(self.data.iloc[i-self.pass_history:i+1])
if enterShortSignal == True:
self.position = -1
self.trade_id = self.trade_id + 1
self.trade_type = -1
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
elif enterLongSignal == True:
self.position = 1
self.trade_id = self.trade_id + 1
self.trade_type = 1
self.entry_time = self.data.index[i]
self.entry_price = self.data['Close'][i]
for i in tqdm(range(self.pass_history, len(self.data)-1)):
if self.position in [1, -1]:
if self.position == -1:
assert self.pass_history%1==0
exitShortSignal = self.strategy_obj.shortExit(self.data.iloc[i-self.pass_history:i+1])
if exitShortSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.entry_price - self.exit_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
if self.position == 1:
exitLongSignal = self.strategy_obj.longExit(self.data.iloc[i-self.pass_history:i+1])
if exitLongSignal == True:
self.position = 0
self.exit_price = self.data['Close'][i]
self.pnl = (self.exit_price - self.entry_price)
self.exit_time = self.data.index[i]
self.tradeExit()
takeEntry()
elif self.position == 0:
takeEntry()
class Metrics():
def __init__(self,
trade_logs):
self.trade_logs = trade_logs
self.trade_logs['Entry Time'] = pd.to_datetime(self.trade_logs['Entry Time'], infer_datetime_format= True)
self.trade_logs['Exit Time'] = pd.to_datetime(self.trade_logs['Exit Time'], infer_datetime_format= True)
self.performance_metrics = pd.DataFrame(index=[
'Total Trades',
'Winning Trades',
'Losing Trades',
'Net P/L',
'Gross Profit',
'Gross Loss',
'P/L Per Trade',
'Max Drawdown',
'Win Percentage',
'Profit Factor'])
self.monthly_performance = pd.DataFrame()
self.yearly_performance = | pd.DataFrame() | pandas.DataFrame |
# Not yet tested
#Import Libraries:
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Dense, GlobalAveragePooling2D
from keras.optimizers import adam
from keras.models import Model
from keras.applications.vgg19 import VGG19
import numpy as np
import pandas as pd
import random
import math
if __name__ == '__main__':
colormode = 'rgb'
channels = 3 #color images have 3 channels. grayscale images have 1 channel
batchsize = 1 #Number of images to be used in each processing batch. Larger batches have a greater impact on training accuracy but that isn't always a good thing
trainingsamples = 25 #Number of images to be used for training set
validationsamples = 25 #Number of images to be used for validation set
model_name = 'KovalModel2' #Any name for saving and keeping track of this model
numclasses = 2
root_dir = 'C:\\Users\\Aadi\\Documents\\GitHub\\KovalCNN\\'
# create the base pre-trained model
base_model = VGG19(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(numclasses, activation='softmax')(x)
# this is the model we will train
model = Model(input=base_model.input, output=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = True
for layer in model.layers:
layer.trainable = True
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) #create model with for binary output with the adam optimization algorithm
train_datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True) # use ImageDataGenerator to enhance the size of our dataset by randomly flipping images. There are many more transformations that are possible
test_datagen = ImageDataGenerator()
#the following code reads images, trains the model, and saves the training history to a csv file:
train_generator = train_datagen.flow_from_directory(
root_dir+"data\\train",
target_size=(150, 150),
batch_size=batchsize,
color_mode=colormode)
validation_generator = test_datagen.flow_from_directory(
root_dir+"data\\val",
target_size=(150, 150),
batch_size=batchsize,
color_mode=colormode)
history = model.fit_generator(
train_generator,
steps_per_epoch=trainingsamples/batchsize,
epochs=100,
validation_data=validation_generator,
validation_steps=validationsamples/batchsize)
hist = history.history
hist = | pd.DataFrame(hist) | pandas.DataFrame |
import typing
import datetime
import pandas as pd
from .make_df import ComicDataFrame
from lib.aws_util.s3.upload import upload_to_s3
from lib.aws_util.s3.download import download_from_s3
def store(df: ComicDataFrame) -> typing.NoReturn:
dt = datetime.datetime.now()
bucket = 'av-adam-store'
save_dir = '/tmp/'
upload_dir = f'ruijianime/comic/'
meta_path = f'{save_dir}meta.csv'
meta_obj = f'{upload_dir}meta.csv'
tag_path = f'{save_dir}tag.csv'
tag_obj = f'{upload_dir}tag.csv'
author_path = f'{save_dir}author.csv'
author_obj = f'{upload_dir}author.csv'
def add_timestamp() -> typing.NoReturn:
df.meta['updated_at'] = dt
df.tag['updated_at'] = dt
df.author['updated_at'] = dt
def download() -> typing.NoReturn:
download_from_s3(bucket, meta_obj, meta_path)
download_from_s3(bucket, tag_obj, tag_path)
download_from_s3(bucket, author_obj, author_path)
def merge() -> typing.NoReturn:
meta_old = pd.read_csv(meta_path)
meta = pd.concat((meta_old, df.meta), ignore_index=True)
meta.drop_duplicates(
subset=['comic_id'],
keep='last',
inplace=True,
)
print(meta)
meta.to_csv(meta_path, index=False)
tag_old = pd.read_csv(tag_path)
tag = pd.concat((tag_old, df.tag), ignore_index=True)
tag.drop_duplicates(
subset=['comic_id', 'tag_id'],
keep='last',
inplace=True,
)
print(tag)
tag.to_csv(tag_path, index=False)
author_old = | pd.read_csv(author_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created in February 2018
@author: <NAME>
file: Method to filter termium from the csv files to represent as dataframe
"""
import pandas as pd
import glob
INPUT = "path to CSV files"
OUTPUT = "path to output file that should then be loaded to a MySQL table"
outputFile = open("outputFile path and name", 'w')
def loadTermiumToDB():
counter = 0
outputFile.write("entryID"+"\t"+"domain"+"\t"+"term"+"\t"+"termLang"+"\t"+"termType"+"\t"+"definition"+"\n")
for filename in glob.iglob(INPUT, recursive=True):
print(filename)
df = pd.read_csv(filename)
for index, row in df.iterrows():
counter += 1
entryID = "TERMIUM"+str(counter)
domain = row[0]
writeToResults(row, entryID, 'TERM_EN', 'ABBREVIATION_EN', 'SYNONYMS_EN', domain, "en", 'TEXTUAL_SUPPORT_1_EN')
writeToResults(row, entryID, 'TERME_FR', 'ABBREVIATION_FR', 'SYNONYMES_FR', domain, "fr", 'JUSTIFICATION_1_FR')
writeToResults(row, entryID, 'TERME_TERM_ES', 'ABBR_ES', 'SYNO_ES', domain, "es", 'JUST_TEXTSUPP_1_ES')
outputFile.close()
def writeToResults(row, entryID, term, abbreviation, synonyms, domain, language, definition):
if not pd.isnull(row[term]):
outputFile.write(entryID+"\t"+domain+"\t"+str(row[term])+"\t"+language+"\t"+"fullForm"+"\t"+str(row[definition])+"\n")
if not pd.isnull(row[abbreviation]):
outputFile.write(entryID+"\t"+domain+"\t"+str(row[abbreviation])+"\t"+language+"\t"+"abbreviation"+"\t"+str(row[definition])+"\n")
if not | pd.isnull(row[synonyms]) | pandas.isnull |
import os
from collections import defaultdict
import luigi
import ujson
import numpy as np
from numpy.random import RandomState
import pandas as pd
from .config import INPUT_DIR, OUTPUT_DIR
from .input_data import OrdersInput, OrderProductsInput
class _InputCSV(luigi.ExternalTask):
filename = None
@classmethod
def count(cls):
df = cls.read()
return df.shape[0]
@classmethod
def output(cls):
path = os.path.join(INPUT_DIR, cls.filename)
return luigi.LocalTarget(path)
@classmethod
def read(cls):
df = pd.read_csv(cls.output().path)
return df
class Products(_InputCSV):
filename = 'products.csv'
class Departments(_InputCSV):
filename = 'departments.csv'
class Aisles(_InputCSV):
filename = 'aisles.csv'
class _OrdersTask(luigi.Task):
def requires(self):
return {
'orders': OrdersInput(),
'order_products': [OrderProductsInput(eval_set=s) for s in ('prior', 'train')],
}
def _read_orders_input(self):
dtype = {
'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': str,
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8,
'days_since_prior_order': np.float16,
}
df = pd.read_csv(self.input()['orders'].path, dtype=dtype)
return df
def _read_order_products_input(self):
dtype = {
'order_id': np.uint32,
'product_id': np.uint32,
'add_to_cart_order': np.uint8,
'reordered': np.uint8,
}
df_parts = []
for task in self.input()['order_products']:
df_part = pd.read_csv(task.path, dtype=dtype)
df_parts.append(df_part)
df = | pd.concat(df_parts) | pandas.concat |
import os
import pandas as pd
import numpy as np
from scipy.fftpack import fft
from scipy import integrate
from scipy.stats import kurtosis
from notebook.pca_reduction import PCAReduction
from notebook.utils import general_normalization, universal_normalization, trim_or_pad_data, feature_matrix_extractor
from notebook.utils import modelAndSave
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
TRIM_DATA_SIZE_REALLY = 30
GESTURE = 'really'
def feature_vector_really(data, isReally=False, test=False):
trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_REALLY)
rY = trimmed_data['rightWrist_y']
lY = trimmed_data['leftWrist_y']
normRawColumn = universal_normalization(rY, trimmed_data, x_norm=False)
normRawColumn = general_normalization(normRawColumn)
diffNormRawData = np.diff(normRawColumn)
#Fast Fourier Transform
fftArray = np.array([])
fftVal = []
fft_coefficients = fft(normRawColumn, n=6)[1:]
fft_coefficients_real = [value.real for value in fft_coefficients]
fftVal += fft_coefficients_real
fftArray = np.append(fftArray, fftVal)
#Area under curve
auc = np.array([])
auc = np.append(auc, abs(integrate.simps(normRawColumn, dx=5)))
#Kurtosis
kur = np.array([])
kur = np.append(kur, kurtosis(normRawColumn))
zeroCrossingArray = np.array([])
maxDiffArray = np.array([])
if diffNormRawData[0] > 0:
initSign = 1
else:
initSign = 0
windowSize = 5
for x in range(1, len(diffNormRawData)):
if diffNormRawData[x] > 0:
newSign = 1
else:
newSign = 0
if initSign != newSign:
zeroCrossingArray = np.append(zeroCrossingArray, x)
initSign = newSign
maxIndex = np.minimum(len(diffNormRawData), x + windowSize)
minIndex = np.maximum(0, x - windowSize)
maxVal = np.amax(diffNormRawData[minIndex:maxIndex])
minVal = np.amin(diffNormRawData[minIndex:maxIndex])
maxDiffArray = np.append(maxDiffArray, (maxVal - minVal))
index = np.argsort(-maxDiffArray)
featureVector = np.array([])
featureVector = np.append(featureVector, fftArray)
featureVector = np.append(featureVector, auc)
featureVector = np.append(featureVector, kur)
featureVector = np.append(featureVector, zeroCrossingArray[index[0:5]])
featureVector = np.append(featureVector, maxDiffArray[index[0:5]])
if TRIM_DATA_SIZE_REALLY - 1> featureVector.shape[0]:
featureVector = np.pad(featureVector, (0, TRIM_DATA_SIZE_REALLY - featureVector.shape[0] - 1), 'constant')
featureVector = featureVector[:TRIM_DATA_SIZE_REALLY-1]
if not test:
if isReally:
featureVector = np.append(featureVector, 1)
else:
featureVector = np.append(featureVector, 0)
return featureVector
def modeling_really(dirPath):
listDir = ['really']
featureMatrixReally = feature_matrix_extractor(dirPath, listDir, feature_vector_really, pos_sample=True)
really_df = | pd.DataFrame(featureMatrixReally) | pandas.DataFrame |
# Preppin' Data 2021 Week 26
import pandas as pd
import numpy as np
from datetime import date, timedelta, datetime
# Load data
rolling = pd.read_csv('unprepped_data\\PD 2021 Wk 26 Input - Sheet1.csv')
# Create a data set that gives 7 rows per date (unless those dates aren't included in the data set).
# - ie 1st Jan only has 4 rows of data (1st, 2nd, 3rd & 4th)
dates = list(set(list(rolling['Date'])))
number_of_dates = len(dates)
# loop through each date, find the 3 days before, 3 after
# create a dataframe and concat together
for i in range(number_of_dates):
a = dates[i]
b = datetime.strptime(a, '%d/%m/%Y')
sdate = b-timedelta(days=3)
edate = b+timedelta(days=3)
rolling_days = | pd.date_range(sdate,edate,freq='d') | pandas.date_range |
import os
import numpy as np
import pandas as pd
import pytest
from conceptnet5.uri import is_term
from conceptnet5.vectors import get_vector
from conceptnet5.vectors.transforms import (
l1_normalize_columns,
l2_normalize_rows,
make_big_frame,
make_small_frame,
shrink_and_sort,
standardize_row_labels,
)
from conceptnet5.vectors.query import VectorSpaceWrapper
@pytest.fixture
def simple_frame():
data = [
[4, 4, 4],
[1, 1, 1],
[1, 2, 10],
[3, 3, 4],
[2, 3, 4],
[2, 3, 5],
[7, 2, 7],
[3, 8, 2],
]
index = [
'island',
'Island',
'cat',
'figure',
'figure skating',
'figure skater',
'thing',
'17',
]
return pd.DataFrame(data=data, index=index)
@pytest.fixture
def multi_ling_frame():
data = [[8, 10, 3], [4, 5, 6], [4, 4, 5], [10, 6, 12], [10, 7, 11], [20, 20, 7]]
index = [
'/c/pl/kombinacja',
'/c/en/ski_jumping',
'/c/en/nordic_combined',
'/c/en/present',
'/c/en/gift',
'/c/en/quiz',
]
return | pd.DataFrame(data=data, index=index) | pandas.DataFrame |
from datetime import datetime, timedelta
from dateutil import parser
from typing import Any, Dict, Iterable, List
import pandas as pd
from sgqlc.operation import Operation
from ..models.iot import (
MetricField,
MetricWindow,
)
from ..utils import make_logger
from ..utils.config import ContxtEnvironmentConfig
from .base_graph_service import BaseGraphService, SchemaMissingException
try:
import contxt.schemas.nionic.nionic_schema as schema
from contxt.schemas.nionic.nionic_schema import MetricData
except ImportError:
raise SchemaMissingException('[ERROR] Schema is not generated for GraphQL -- run `contxt init` to '
'initialize then re-run the command')
logger = make_logger(__name__)
class IOTRequestException(Exception):
pass
class IotNionicHelper(BaseGraphService):
def __init__(self, contxt_env: ContxtEnvironmentConfig):
super().__init__(contxt_env)
def get_latest_states(self, fields: List[MetricField]) -> Dict[str, schema.MetricData]:
op = Operation(schema.Query)
field_aliases = []
for field in fields:
field_alias = field.label if field.alias is None else field.alias.replace('-', '_')
field_aliases.append(field_alias)
metric_data = op.metric_data(label=field.label, source_id=field.sourceId, window='1min',
order_by=[schema.MetricDataOrderBy.TIME_DESC], first=1,
to=str(datetime.utcnow()), from_=str(datetime.utcnow() - timedelta(days=1)),
__alias__=field_alias)
metric_data.nodes().time()
metric_data.nodes().data()
print(op)
data = self.run(op)
metric_data = (op + data)
result_data = {}
for field in field_aliases:
res = metric_data[field]
if len(res.nodes):
result_data[field] = res.nodes[0]
else:
result_data[field] = None
return result_data
def get_metric_data(self, field: MetricField, start_time: datetime, end_time: datetime,
window: MetricWindow = MetricWindow.MINUTELY, order_by=[schema.MetricDataOrderBy.TIME_ASC],
aggregation: schema.MetricDataAggregationMethod = 'AVG'
) -> schema.MetricData:
op = Operation(schema.Query)
if window is not MetricWindow.MINUTELY:
metric_data = op.metric_data(label=field.label, source_id=field.sourceId, window=window.value,
order_by=order_by, from_=str(start_time), to=str(end_time),
aggregation=aggregation)
else:
metric_data = op.metric_data(label=field.label, source_id=field.sourceId, window=window.value,
order_by=order_by, from_=str(start_time), to=str(end_time))
metric_data.nodes().time()
metric_data.nodes().data()
# page info
metric_data.page_info().has_next_page()
print(op)
data = self.run(op)
return (op + data).metric_data
def get_metric_data_series(self, field: MetricField, start_time: datetime, end_time: datetime,
window: MetricWindow = MetricWindow.MINUTELY, order_by=[schema.MetricDataOrderBy.TIME_ASC],
aggregation: str = 'AVG'
) -> pd.Series:
if aggregation not in schema.MetricDataAggregationMethod.__choices__:
raise IOTRequestException(f'Aggregation method {aggregation} not a valid aggregation method')
agg_method = schema.MetricDataAggregationMethod(aggregation)
parsed_data = []
time_index = []
while True:
data = self.get_metric_data(field, start_time, end_time, window, order_by, aggregation=agg_method)
has_another_page = data.page_info.has_next_page
for d in data.nodes:
time_index.append(parser.parse(d.time))
try:
parsed_data.append(float(d.data))
except ValueError as e:
parsed_data.append(d.data)
if not has_another_page:
break
start_time = time_index[-1]
df = | pd.Series(parsed_data, time_index) | pandas.Series |
"""Tests."""
from math import ceil # type: ignore
import datetime # type: ignore
import pytest # type: ignore
import pandas as pd # type: ignore
import numpy as np # type: ignore
import altair as alt # type: ignore
from src.penn_chime.charts import new_admissions_chart, admitted_patients_chart, chart_descriptions
from src.penn_chime.models import SimSirModel, sir, sim_sir_df, build_admits_df
from src.penn_chime.parameters import Parameters
from src.penn_chime.presentation import display_header
from src.penn_chime.settings import DEFAULTS
from src.penn_chime.defaults import RateLos
PARAM = Parameters(
current_hospitalized=100,
doubling_time=6.0,
known_infected=5000,
market_share=0.05,
relative_contact_rate=0.15,
susceptible=500000,
hospitalized=RateLos(0.05, 7),
icu=RateLos(0.02, 9),
ventilated=RateLos(0.01, 10),
n_days=60,
)
MODEL = SimSirModel(PARAM)
# set up
# we just want to verify that st _attempted_ to render the right stuff
# so we store the input, and make sure that it matches what we expect
class MockStreamlit:
def __init__(self):
self.render_store = []
self.markdown = self.just_store_instead_of_rendering
self.latex = self.just_store_instead_of_rendering
self.subheader = self.just_store_instead_of_rendering
def just_store_instead_of_rendering(self, inp, *args, **kwargs):
self.render_store.append(inp)
return None
def cleanup(self):
"""
Call this after every test, unless you intentionally want to accumulate stuff-to-render
"""
self.render_store = []
st = MockStreamlit()
# test presentation
def test_penn_logo_in_header():
penn_css = '<link rel="stylesheet" href="https://www1.pennmedicine.org/styles/shared/penn-medicine-header.css">'
display_header(st, MODEL, PARAM)
assert len(
list(filter(lambda s: penn_css in s, st.render_store))
), "The Penn Medicine header should be printed"
def test_the_rest_of_header_shows_up():
random_part_of_header = "implying an effective $R_t$ of"
assert len(
list(filter(lambda s: random_part_of_header in s, st.render_store))
), "The whole header should render"
def test_mitigation_statement():
st.cleanup()
expected_doubling = "outbreak **reduces the doubling time to 7.8** days"
display_header(st, MODEL, PARAM)
assert [s for s in st.render_store if expected_doubling in s]
# assert len((list(filter(lambda s: expected_doubling in s, st.render_store))))
st.cleanup()
expected_halving = "outbreak **halves the infections every 51.9** days"
halving_params = Parameters(
current_hospitalized=100,
doubling_time=6.0,
known_infected=5000,
market_share=0.05,
relative_contact_rate=0.7,
susceptible=500000,
hospitalized=RateLos(0.05, 7),
icu=RateLos(0.02, 9),
ventilated=RateLos(0.01, 10),
n_days=60,
)
halving_model = SimSirModel(halving_params)
display_header(st, halving_model, halving_params)
assert [s for s in st.render_store if expected_halving in s]
#assert len((list(filter(lambda s: expected_halving in s, st.render_store))))
st.cleanup()
st.cleanup()
@pytest.mark.xfail()
def test_header_fail():
"""
Just proving to myself that these tests work
"""
some_garbage = "ajskhlaeHFPIQONOI8QH34TRNAOP8ESYAW4"
display_header(st, PARAM)
assert len(
list(filter(lambda s: some_garbage in s, st.render_store))
), "This should fail"
st.cleanup()
def test_defaults_repr():
"""
Test DEFAULTS.repr
"""
repr(DEFAULTS)
# Test the math
def test_sir():
"""
Someone who is good at testing, help
"""
sir_test = sir(100, 1, 0, 0.2, 0.5, 1)
assert sir_test == (
0.7920792079207921,
0.20297029702970298,
0.0049504950495049506,
), "This contrived example should work"
assert isinstance(sir_test, tuple)
for v in sir_test:
assert isinstance(v, float)
assert v >= 0
# Certain things should *not* work
with pytest.raises(TypeError) as error:
sir("S", 1, 0, 0.2, 0.5, 1)
assert str(error.value) == "can't multiply sequence by non-int of type 'float'"
with pytest.raises(TypeError) as error:
sir(100, "I", 0, 0.2, 0.5, 1)
assert str(error.value) == "can't multiply sequence by non-int of type 'float'"
with pytest.raises(TypeError) as error:
sir(100, 1, "R", 0.2, 0.5, 1)
assert str(error.value) == "unsupported operand type(s) for +: 'float' and 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, "beta", 0.5, 1)
assert str(error.value) == "bad operand type for unary -: 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, 0.2, "gamma", 1)
assert str(error.value) == "unsupported operand type(s) for -: 'float' and 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, 0.2, 0.5, "N")
assert str(error.value) == "unsupported operand type(s) for /: 'str' and 'float'"
# Zeros across the board should fail
with pytest.raises(ZeroDivisionError):
sir(0, 0, 0, 0, 0, 0)
def test_sim_sir():
"""
Rounding to move fast past decimal place issues
"""
raw_df = sim_sir_df(5, 6, 7, 0.1, 0.1, 40)
first = raw_df.iloc[0, :]
last = raw_df.iloc[-1, :]
assert round(first.susceptible, 0) == 5
assert round(first.infected, 2) == 6
assert round(first.recovered, 0) == 7
assert round(last.susceptible, 2) == 0
assert round(last.infected, 2) == 0.18
assert round(last.recovered, 2) == 17.82
assert isinstance(raw_df, pd.DataFrame)
def test_new_admissions_chart():
projection_admits = pd.read_csv("tests/projection_admits.csv")
chart = new_admissions_chart(alt, projection_admits, PARAM)
assert isinstance(chart, alt.Chart)
assert chart.data.iloc[1].hospitalized < 1
assert round(chart.data.iloc[40].icu, 0) == 25
# test fx call with no params
with pytest.raises(TypeError):
new_admissions_chart()
empty_chart = new_admissions_chart(alt, pd.DataFrame(), PARAM)
assert empty_chart.data.empty
def test_admitted_patients_chart():
census_df = pd.read_csv("tests/census_df.csv")
chart = admitted_patients_chart(alt, census_df, PARAM)
assert isinstance(chart, alt.Chart)
assert chart.data.iloc[1].hospitalized == 1
assert chart.data.iloc[49].ventilated == 203
# test fx call with no params
with pytest.raises(TypeError):
admitted_patients_chart()
empty_chart = admitted_patients_chart(alt, pd.DataFrame(), PARAM)
assert empty_chart.data.empty
def test_model(model=MODEL, param=PARAM):
# test the Model
assert model.infected == 40000.0
assert isinstance(model.infected, float) # based off note in models.py
# test the class-calculated attributes
assert model.detection_probability == 0.125
assert model.intrinsic_growth_rate == 0.12246204830937302
assert model.beta == 3.2961405355450555e-07
assert model.r_t == 2.307298374881539
assert model.r_naught == 2.7144686763312222
assert model.doubling_time_t == 7.764405988534983
# test the things n_days creates, which in turn tests sim_sir, sir, and get_dispositions
assert len(model.raw_df) == param.n_days + 1 == 61
raw_df = model.raw_df
first = raw_df.iloc[0, :]
second = raw_df.iloc[1, :]
last = raw_df.iloc[-1, :]
assert first.susceptible == 500000.0
assert round(second.infected, 0) == 43735
assert round(last.susceptible, 0) == 67202
assert round(raw_df.recovered[30], 0) == 224048
assert [d[0] for d in model.dispositions.values()] == [100.0, 40.0, 20.0]
assert [round(d[60], 0) for d in model.dispositions.values()] == [1182.0, 473.0, 236.0]
# test that admissions are being properly calculated (thanks @PhilMiller)
admissions = build_admits_df(param.n_days, model.dispositions)
cumulative_admissions = admissions.cumsum()
diff = cumulative_admissions["hospitalized"][1:-1] - (
0.05 * 0.05 * (raw_df.infected[1:-1] + raw_df.recovered[1:-1]) - 100
)
assert (diff.abs() < 0.1).all()
def test_chart_descriptions(p=PARAM):
# new admissions chart
projection_admits = | pd.read_csv('tests/projection_admits.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = | pd.DataFrame(d) | pandas.DataFrame |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = | pd.date_range(start='20190101', freq='1h', periods=2) | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import xlrd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import roc_curve, auc, accuracy_score
import matplotlib.pyplot as plt
import xgboost as xgb
import pandas as pd
from xgboost import XGBClassifier
from pandas.core.frame import DataFrame
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
from sklearn import tree
from sklearn.tree import export_graphviz
import pydotplus
import random
import requests
import json
import base64
import urllib
import sys
import ssl
import graphviz
import time
# In[ ]:
def readxls(root):
data_list=[]
data=xlrd.open_workbook(root)
table=data.sheets()[0]
nrows=table.nrows
ncols=table.ncols
for i in range(1,nrows):
data_list.append(table.row_values(i))
rowname=table.row_values(0)
return data_list,rowname
# In[ ]:
healthy,rowname=readxls("negative_data.xls")
unhealthy,rowname=readxls("positive_data.xls")
total_data=healthy+unhealthy
total_data=DataFrame(total_data)
total_data.columns=rowname
#print(total_data)
target=[0]*len(healthy)+[1]*len(unhealthy)
X_train, X_test, y_train, y_test =train_test_split(total_data, target, test_size=0.25, random_state=99999)
# In[ ]:
start = time.clock()
clf = ExtraTreesClassifier()
clf = clf.fit(X_train, y_train)
print(clf.feature_importances_)
model = SelectFromModel(clf, prefit=True,threshold=0.03)
X_train = model.transform(X_train)
X_test=model.transform(X_test)
print(len(X_train[0]))
end = time.clock()
print(str(end-start))
# In[ ]:
select_result=DataFrame(clf.feature_importances_).sort_values(by=0).T
select_result.columns=rowname
select_result.to_csv('feature selection.csv')
print(select_result)
# In[ ]:
sel_rows=np.array(rowname)[clf.feature_importances_>=0.03]
# In[ ]:
X_train=DataFrame(X_train)
X_train.columns=np.array(rowname)[clf.feature_importances_>=0.03]
X_test=DataFrame(X_test)
X_test.columns=np.array(rowname)[clf.feature_importances_>=0.03]
# In[ ]:
start=time.clock()
clf = tree.DecisionTreeClassifier(max_depth=6,min_samples_split=12)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
end = time.clock()
print(str(end-start))
# In[ ]:
dot_tree = tree.export_graphviz(clf,out_file=None,feature_names=sel_rows,class_names=['未得肾病','得肾病'],filled=True, rounded=True,special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_tree)
graph.write_png("tree6.png")
# In[ ]:
y_score = clf.fit(X_train, y_train).predict_proba(X_test)
y_score=[a[1] for a in y_score]
#y_pred=y_score>=threshold
fpr,tpr,threshold = roc_curve(y_test, y_score) ###计算真正率和假正率
roc_auc = auc(fpr,tpr) ###计算auc的值
plt.figure()
lw = 2
plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC CURVE')
plt.legend(loc="lower right")
plt.show()
# In[ ]:
clf.feature_importances_
# In[ ]:
start=time.clock()
svclassifier = SVC(kernel='poly',degree=3,class_weight={1:len(unhealthy)/len(target),0:len(healthy)/len(target)},probability=True)
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
end = time.clock()
print(str(end-start))
# In[ ]:
print(svclassifier)
# In[ ]:
y_score=svclassifier.fit(X_train, y_train).decision_function(X_test)
fpr,tpr,threshold = roc_curve(y_test, y_score) ###计算真正率和假正率
roc_auc = auc(fpr,tpr) ###计算auc的值
plt.figure()
lw = 2
plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC CURVE')
plt.legend(loc="lower right")
plt.show()
# In[ ]:
start = time.clock()
model = XGBClassifier(booster='gbtree',max_depth=5,eval_metric='auc',learning_rate=0.7,min_child_weight= 0.9,verbose_eval=True)
model.fit( | DataFrame(X_train,dtype='float') | pandas.core.frame.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
| pd.Period("2012-01-01", freq="D") | pandas.Period |
#!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
@File : Stress_detection_script.py
@Time : 2022/03/17 09:45:59
@Author : <NAME>
@Contact : <EMAIL>
'''
import os
import logging
import plotly.express as px
import numpy as np
import pandas as pd
import zipfile
import fnmatch
import flirt.reader.empatica
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime, timedelta
import cvxopt as cv
from neurokit2 import eda_phasic
from matplotlib.font_manager import FontProperties
import matplotlib.dates as mdates
# rootPath = r"./"
# pattern = '*.zip'
rootPath = input("Enter Folder Path : ")
pattern = input("Enter File Name : ")
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
print(os.path.join(root, filename))
zipfile.ZipFile(os.path.join(root, filename)).extractall(
os.path.join(root, os.path.splitext(filename)[0]))
dir = os.path.splitext(pattern)[0]
# os.listdir(dir)
class process:
def moving_avarage_smoothing(X, k, description_str):
S = np.zeros(X.shape[0])
for t in tqdm(range(X.shape[0]), desc=description_str):
if t < k:
S[t] = np.mean(X[:t+1])
else:
S[t] = np.sum(X[t-k:t])/k
return S
def deviation_above_mean(unit, mean_unit, std_unit):
'''
Function takes 3 arguments
unit : number of Standard deviations above the mean
mean_unit : mean value of each signal
std_unit : standard deviation of each signal
'''
if unit == 0:
return (mean_unit)
else:
return (mean_unit + (unit*std_unit))
def Starting_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
starting_time_index = []
for i in range(len(column)-1): #iterating till the end of the array
if column[i] < deviation_metric and column[i+1] > deviation_metric: # checking if the n+1 element is greater than nth element to conclude if the signal is increasing
starting_time_index.append(time_frames[i]) #appending the timestamp's index to the declared empty array
return starting_time_index
def Ending_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
time_index = []
for i in range(len(column)-1):
if column[i] > deviation_metric and column[i+1] < deviation_metric: # checking if the n+1 element is lesser than nth element to conclude if the signal is decreasing
time_index.append(time_frames[i])
if column[len(column) - 1] > deviation_metric: # checking for hanging ends, where the signal stops abruptly
time_index.insert(
len(time_index), time_frames[len(time_frames) - 1]) # inserting the timestamp's index to the last index of the array
else:
pass
return time_index
def Extract_HRV_Information():
global hrv_features # declaring global to get access them for combined plot function
global hrv_events_df # declaring global to get access them for combined plot function
ibi = pd.read_csv(rootPath+'/'+dir+'\IBI.csv')
mean_ibi = ibi[' IBI'].mean()
average_heart_rate = 60/mean_ibi
print('mean ibi is :', mean_ibi)
print('mean heart rate :', average_heart_rate.round())
ibis = flirt.reader.empatica.read_ibi_file_into_df(
rootPath+'/'+dir + '\IBI.csv')
hrv_features = flirt.get_hrv_features(
ibis['ibi'], 128, 1, ["td", "fd"], 0.2)
hrv_features = hrv_features.dropna(how='any', axis=0)
hrv_features.reset_index(inplace=True)
hrv_features['datetime'] = hrv_features['datetime'].dt.tz_convert('US/Eastern')
hrv_features['datetime'] = pd.to_datetime(hrv_features['datetime'])
hrv_features['datetime'] = hrv_features['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
# smoothing the curve
print('\n', '******************** Smoothing The Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
hrv_features['hrv_rmssd'], 500, "Processing HRV Data")
hrv_features['MAG_K500'] = MAG_K500
# hrv_features.to_csv("./Metadata/"+ dir+"_HRV.csv")
# hrv_features.to_csv(os.path.join('./Metadata'+dir+'_HRV.csv'))
mean_rmssd = hrv_features['hrv_rmssd'].mean()
std_rmssd = hrv_features['hrv_rmssd'].std()
# getting the starting and ending time of of the signal
starting_timestamp = process.Starting_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
ending_timestamp = process.Ending_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
# in the below if case i am assuming that there was no events that crossed the threshold
if len(starting_timestamp) < 1:
fig, ax1 = plt.subplots(figsize=(30, 10))
ax1.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
# fig.savefig('./Plots/HRV_figure.png')
else:
#check if the len of starting timestamps and ending timestamps are equal if not popping the last element of the ending timestamp
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
else:
pass
difference = [] # empty array to see how long the event lasts in seconds
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i) #subtracting ending timestamp - starting timestamp to get difference in seconds
for i in difference:
time_delta_minutes.append(i.total_seconds()/60) # converting the second's difference to minuted
time_delta_minutes
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 5.00: #checking if the each episode is more then 5 minutes
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
hrv_events_df = pd.concat(frames, axis=1)
hrv_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
hrv_events_df['Starting Timestamp'] = hrv_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S") #converting it to Y:M:D H:M:S to ignore nanoseconds in timestamp dataframe
hrv_events_df['Ending Timestamp'] = hrv_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
hrv_events_df = hrv_events_df.loc[desired_time_index, :] # selecting only the timestamps which crosses the time threshold limit
fig, ax = plt.subplots(figsize=(20, 6))
ax.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
for d in hrv_events_df.index:
ax.axvspan(hrv_events_df['Starting Timestamp'][d], hrv_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax.relim()
ax.autoscale_view()
# fig.savefig('./Plots/HRV_figure.png')
return hrv_features, hrv_events_df
def Extract_ACC_Infromation():
global acc_df
global acc_events_df
acc_df = pd.read_csv(rootPath+'/'+dir + '/ACC.csv')
acc_df = flirt.reader.empatica.read_acc_file_into_df(
rootPath+'/'+dir + '/ACC.csv')
acc_df['Magnitude'] = np.sqrt(
acc_df['acc_x']**2 + acc_df['acc_y']**2 + acc_df['acc_z']**2)
print("Magnitude Mean : ", acc_df['Magnitude'].mean())
acc_df.reset_index(inplace=True)
acc_df['datetime'] = acc_df['datetime'].dt.tz_convert('US/Eastern')
acc_df['datetime'] = pd.to_datetime(acc_df['datetime'])
acc_df['datetime'] = acc_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
print('\n', '******************** Smoothing The ACC Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
acc_df['Magnitude'], 15000, "Processing ACC Data")
acc_df['MAG_K500'] = MAG_K500
# acc_df.to_csv("./Metadata/"+ dir+"_ACC.csv")
mean_acc_magnitude = acc_df['Magnitude'].mean()
std_acc_magnitude = acc_df['Magnitude'].std()
print("Average Magnitude of the Acc Data : ", mean_acc_magnitude)
starting_timestamp = process.Starting_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
ending_timestamp = process.Ending_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(acc_df['datetime'], acc_df['MAG_K500'], color='red')
fig.savefig('./Plots/ACC_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
acc_events_df = pd.concat(frames, axis=1)
acc_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
acc_events_df['Starting Timestamp'] = acc_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
acc_events_df['Ending Timestamp'] = acc_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
acc_events_df = acc_events_df.loc[desired_time_index, :]
# acc_events_df.to_csv(rootPath+"timestamp_" +dir+ "_ACC.csv")
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(acc_df['datetime'], acc_df['MAG_K500'], color='red')
for d in acc_events_df.index:
ax2.axvspan(acc_events_df['Starting Timestamp'][d], acc_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax2.relim()
ax2.autoscale_view()
fig.savefig('./Plots/ACC_figure.png')
def Extract_GSR_Phasic_Information():
global eda_df
global eda_phasic_df
global eda_phasic_events_df
eda_df = pd.read_csv(rootPath+'/'+dir+'/EDA.csv')
eda_df = flirt.reader.empatica.read_eda_file_into_df(
rootPath+'/' + dir + '/EDA.csv')
eda_df.reset_index(inplace=True)
eda_df['datetime'] = eda_df['datetime'].dt.tz_convert('US/Eastern')
eda_df['datetime'] = pd.to_datetime(eda_df['datetime'])
eda_df['datetime'] = eda_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
eda = np.array(eda_df['eda'])
Phasic_Tonic_DF = eda_phasic(eda, 4, method='cvxEDA')
eda_df['tonic'] = Phasic_Tonic_DF['EDA_Tonic']
eda_df['phasic'] = Phasic_Tonic_DF['EDA_Phasic']
eda_phasic_df = eda_df.copy()
print('\n', '******************** Smoothing The EDA Phasic Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
eda_phasic_df['phasic'], 2000, "Processing EDA Phasic Data")
eda_phasic_df['MAG_K500'] = MAG_K500
# hrv_features.to_csv('hrv_features.csv')
mean_eda_phasic = eda_phasic_df['phasic'].mean()
std_eda_phasic = eda_phasic_df['phasic'].std()
starting_timestamp = process.Starting_timeStamp(eda_phasic_df['MAG_K500'], eda_phasic_df['datetime'],
process.deviation_above_mean(1, mean_eda_phasic, std_eda_phasic))
ending_timestamp = process.Ending_timeStamp(eda_phasic_df['MAG_K500'], eda_phasic_df['datetime'],
process.deviation_above_mean(1, mean_eda_phasic, std_eda_phasic))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(eda_phasic_df['datetime'],
eda_phasic_df['MAG_K500'], color='red')
fig.savefig('./Plots/EDA_Phasic_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
eda_phasic_events_df = pd.concat(frames, axis=1)
eda_phasic_events_df.columns = [
'Starting Timestamp', 'Ending Timestamp']
eda_phasic_events_df['Starting Timestamp'] = eda_phasic_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
eda_phasic_events_df['Ending Timestamp'] = eda_phasic_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
eda_phasic_events_df = eda_phasic_events_df.loc[desired_time_index, :]
# eda_phasic_events_df.to_csv(rootPath+"timestamp_" + dir + "_EDA.csv")
fig, ax3 = plt.subplots(figsize=(30, 10))
ax3.plot(eda_phasic_df['datetime'],
eda_phasic_df['MAG_K500'], color='red')
for d in eda_phasic_events_df.index:
ax3.axvspan(eda_phasic_events_df['Starting Timestamp'][d],
eda_phasic_events_df['Ending Timestamp'][d], facecolor="g", edgecolor="none", alpha=0.5)
ax3.relim()
ax3.autoscale_view()
fig.savefig('./Plots/EDA_Phasic_figure.png')
return eda_df
def Extract_GSR_Tonic_Information():
global eda_tonic_df
global eda_tonic_events_df
eda_df = pd.read_csv(rootPath+'/'+dir+'/EDA.csv')
eda_df = flirt.reader.empatica.read_eda_file_into_df(
rootPath+'/' + dir + '/EDA.csv')
eda_df.reset_index(inplace=True)
eda_df['datetime'] = eda_df['datetime'].dt.tz_convert('US/Eastern')
eda_df['datetime'] = | pd.to_datetime(eda_df['datetime']) | pandas.to_datetime |
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
class TestReorderLevels:
def test_reorder_levels(self, frame_or_series):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
obj = tm.get_obj(df, frame_or_series)
# no change, position
result = obj.reorder_levels([0, 1, 2])
tm.assert_equal(obj, result)
# no change, labels
result = obj.reorder_levels(["L0", "L1", "L2"])
tm.assert_equal(obj, result)
# rotate, position
result = obj.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = | tm.get_obj(expected, frame_or_series) | pandas._testing.get_obj |
"""
General purpose parser for the output of the MAP operations of GMQL
"""
import pandas as pd
import os
import xml.etree.ElementTree
class OutputGenerator:
def __init__(self,path):
self.path = path
self.data = None
self.meta_data = None
return
def get_sample_name(self, path):
sp = path.split('/')
file_name = sp[-1]
return file_name.split('.')[0]
def _get_files(self, extension, path):
# retrieves the files sharing the same extension
files = []
for file in os.listdir(path):
if file.endswith(extension):
files.append(os.path.join(path, file))
return sorted(files)
def _get_file(self, extension):
for file in os.listdir(self.path):
if file.endswith(extension):
return os.path.join(self.path, file)
def parse_schema(self, schema_file):
# parses the schema and returns its columns
e = xml.etree.ElementTree.parse(schema_file)
root = e.getroot()
cols = []
for elem in root.findall(".//{http://genomic.elet.polimi.it/entities}field"): # XPATH
cols.append(elem.text)
return cols
def read_meta_data(self, fname):
# reads a meta data file into a dictionary
d = {}
with open(fname) as f:
for line in f:
(key, val) = line.split('\t')
d[key] = val
return d
def read_all_meta_data(self):
# reads all meta data files
files = self._get_files("meta", self.path)
meta_data = []
for f in files:
var = self.read_meta_data(f)
meta_data.append(var)
self.meta_data = meta_data
def read_one(self, path, cols, desired_col):
# reads a sample file
df = pd.read_table(path, sep="\t|;", lineterminator="\n")
df = df.drop(df.columns[[-1]],axis=1) # the last column is null
df.columns = cols # column names from schema
df = df.drop(df.columns[[1, 2, 5, 7]], axis=1)
df['region'] = df['seqname'].map(str) + ',' + df['start'].map(str) + '-' + df['end'].map(str) + ',' + df[
'strand'].map(str)
sample = self.get_sample_name(path)
df['sample'] = sample
desired_cols = ['sample', 'region', desired_col]
df = df[desired_cols]
df[desired_col] = df[desired_col].apply(lambda x: x.split('"')[-2] if isinstance(x, str) and (x.find('"') != -1) else x) #if it is a string and contains "
return df
def select_columns(self, desired_cols):
self.data = self.data[desired_cols]
def read_all(self, path, schema_file,desired_col):
# reads all sample files
files = self._get_files("gtf", path)
df = | pd.DataFrame() | pandas.DataFrame |
"""
Create by: apenasrr
Source: https://github.com/apenasrr/mass_videojoin
"""
import os
import pandas as pd
import datetime
import logging
from video_tools import change_width_height_mp4, get_video_details, \
join_mp4, split_mp4
from config_handler import handle_config_file
import unidecode
import natsort
import glob
def logging_config():
logfilename = 'log-' + 'mass_videojoin' + '.txt'
logging.basicConfig(filename=logfilename, level=logging.DEBUG,
format=' %(asctime)s-%(levelname)s-%(message)s')
# set up logging to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter(' %(asctime)s-%(levelname)s-%(message)s')
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logger = logging.getLogger(__name__)
def clean_cmd():
clear = lambda: os.system('cls')
clear()
def df_sort_human(df):
"""
Sort files and folders in human way.
So after folder/file '1' comes '2', instead of '10' and '11'.
Simple yet flexible natural sorting in Python.
When you try to sort a list of strings that contain numbers,
the normal python sort algorithm sorts lexicographically,
so you might not get the results that you expect:
More info: https://github.com/SethMMorton/natsort
:input: DataFrame. With columns [file_folder, file_name]
:return: DataFrame. Sort in a human way by [file_folder, file_name]
"""
def sort_human(list_):
list_ = natsort.natsorted(list_)
return list_
def sort_df_column_from_list(df, column_name, sorter):
"""
:input: df: DataFrame
:input: column_name: String
:input: sorter: List
:return: DataFrame
"""
sorterIndex = dict(zip(sorter, range(len(sorter))))
df['order'] = df[column_name].map(sorterIndex)
df.sort_values(['order'], ascending = [True ], inplace = True)
df.drop(['order', column_name], 1, inplace = True)
return df
column_name = 'path_file'
df[column_name] = df['file_folder'] + '\\' + df['file_name']
list_path_file = df[column_name].tolist()
sorter = sort_human(list_path_file)
df = sort_df_column_from_list(df, column_name, sorter)
return df
def gen_report(path_dir):
# TODO input more file video extension:
## https://dotwhat.net/type/video-movie-files
l=[]
for root, dirs, files in os.walk(path_dir):
for file in files:
file_lower = file.lower()
if file_lower.endswith((".mp4", ".avi", ".webm", '.ts', '.vob',
'.mov')):
print(file)
path_file = os.path.join(root, file)
dict_inf = get_video_details(path_file)
(mode, ino, dev, nlink, uid,
gid, size, atime, mtime, ctime) = os.stat(path_file)
mtime = datetime.datetime.fromtimestamp(mtime)
d={}
d['mtime']=mtime
d['file_folder'] = root
d['file_name'] = file
d['file_size'] = os.path.getsize(path_file)
try:
d['duration'] = dict_inf['duration']
except:
logging.error('video without duration: {path_file}')
d['duration'] = ''
d['bitrate'] = dict_inf['bitrate']
d['video_codec'] = dict_inf['video']['codec']
d['video_profile'] = dict_inf['video']['profile']
d['video_resolution'] = dict_inf['video']['resolution']
d['video_bitrate'] = dict_inf['video']['bitrate']
# some videos dont have audio
try:
d['audio_codec'] = dict_inf['audio']['codec']
d['audio_frequency'] = dict_inf['audio']['frequency']
d['audio_bitrate'] = dict_inf['audio']['bitrate']
except:
d['audio_codec'] = ''
d['audio_frequency'] = ''
d['audio_bitrate'] = ''
d['video_resolution_to_change'] = ''
l.append(d)
df = pd.DataFrame(l)
return df
def get_video_details_with_group(df):
df['key_join_checker'] = df['audio_codec'] + '-' + \
df['video_codec'] + '-' + \
df['video_resolution']
# set group_encode
df['group_encode'] = 1
for index, row in df.iterrows():
if index>0:
group_encode_value_prev = df.loc[index-1, 'group_encode']
if row['key_join_checker'] != df.loc[index-1, 'key_join_checker']:
df.loc[index, 'group_encode'] = group_encode_value_prev + 1
else:
df.loc[index, 'group_encode'] = group_encode_value_prev
return df
def get_list_chunk_videos_from_group(df, group_no, max_size_mb):
max_size_bytes = max_size_mb * 1024**2
mask = df['group_encode'].isin([group_no])
df['file_path'] = df['file_folder'] + '\\' + \
df['file_name']
df_group = df.loc[mask, :]
list_chunk_videos = []
chunk_size = 0
list_videos = []
for index, row in df_group.iterrows():
if chunk_size + row['file_size'] > max_size_bytes:
logging.info(f'join video from {len(list_videos)} files')
list_chunk_videos.append(list_videos)
list_videos = []
chunk_size = 0
list_videos.append(row['file_path'])
chunk_size += row['file_size']
if len(list_videos) > 0:
logging.info(f'join video from {len(list_videos)} files')
list_chunk_videos.append(list_videos)
list_videos = []
logging.info(f'group {group_no} will generate ' + \
f'{len(list_chunk_videos)} videos')
return list_chunk_videos
def get_list_chunk_videos(df, max_size_mb):
list_group = df['group_encode'].unique().tolist()
list_final = []
for group_no in list_group:
group_no = str(group_no)
list_chunk_videos = get_list_chunk_videos_from_group(df, group_no,
max_size_mb)
list_final += list_chunk_videos
print('')
return list_final
def get_name_dir_origin():
name_file_folder_name = get_txt_folder_origin()
dir_name_saved = get_txt_content(name_file_folder_name)
return dir_name_saved
def get_path_folder_output_video():
path_folder_output = get_name_dir_origin()
folder_name = 'output_' + path_folder_output
ensure_folder_existence([folder_name])
path_folder_output_video = os.path.join(folder_name, 'output_videos')
ensure_folder_existence([path_folder_output_video])
return path_folder_output_video
def join_videos(df, max_size_mb, start_index_output):
# path_folder_output = userpref_folderoutput()
path_folder_output = get_path_folder_output_video()
# default_filename_output = input('Enter a default name for the joined ' +\
# 'videos: ')
default_filename_output = get_name_dir_origin()
df['file_path'] = df['file_folder'] + '\\' + df['file_name']
list_chunk_videos = get_list_chunk_videos(df, max_size_mb)
df['file_output'] = ''
for index, list_file_path in enumerate(list_chunk_videos):
file_count = index + start_index_output
file_name_output = f'{default_filename_output}-%03d.mp4' % file_count
file_path_output = os.path.join(path_folder_output, file_name_output)
join_mp4(list_file_path, file_path_output)
# register file_output in dataframe
mask_files_joined = df['file_path'].isin(list_file_path)
df.loc[mask_files_joined, 'file_output'] = file_name_output
print(f'total: {len(list_chunk_videos)} videos')
return df
def exclude_all_files_from_folder(path_folder):
path_folder_regex = os.path.join(path_folder, '*')
r = glob.glob(path_folder_regex)
for i in r:
os.remove(i)
def make_reencode(df):
folder_script_path = get_folder_script_path()
path_folder_encoded = os.path.join(folder_script_path, 'videos_encoded')
exclude_all_files_from_folder(path_folder_encoded)
# exclude_folder_videos_encoded()
df['file_folder_origin'] = df['file_folder']
df['file_name_origin'] = df['file_name']
df['file_size_origin'] = df['file_size']
df['video_resolution_origin'] = df['video_resolution']
mask_df_to_reencode = ~df['video_resolution_to_change'].isna()
df_to_reencode = df.loc[mask_df_to_reencode, :]
folder_script_path = get_folder_script_path()
path_folder_dest = os.path.join(folder_script_path, 'videos_encoded')
for index, row in df_to_reencode.iterrows():
size_width, size_height = row['video_resolution_to_change'].split('x')
path_file_origin = os.path.join(row['file_folder_origin'],
row['file_name_origin'])
print(path_file_origin)
# path_folder_dest = r'videos_encoded'
path_file_name_dest = str(index) + '.mp4'
path_file_dest = os.path.join(path_folder_dest,
path_file_name_dest)
# todo reencode
# input path_folder_dest in column file_folder
df.loc[index, 'file_folder'] = os.path.abspath(path_folder_dest)
# input path_file_name_dest in column file_name
df.loc[index, 'file_name'] = path_file_name_dest
change_width_height_mp4(path_file_origin, size_height,
size_width, path_file_dest)
file_size = os.stat(path_file_dest).st_size
df.loc[index, 'file_size'] = file_size
df.loc[index, 'video_resolution'] = row['video_resolution_to_change']
# from encoded video get video metadata
metadata = get_video_details(path_file_dest)
# register video metadata
df.loc[index, 'bitrate'] = metadata['bitrate']
df.loc[index, 'video_bitrate'] = metadata['video']['bitrate']
df.loc[index, 'video_codec'] = metadata['video']['codec']
df.loc[index, 'audio_codec'] = metadata['audio']['codec']
df.loc[index, 'audio_bitrate'] = metadata['audio']['bitrate']
df.loc[index, 'duration'] = metadata['duration']
return df
def menu_ask():
# ptbr
# print('1-Gerar planilha listando os arquivos')
# print('2-Processar reencode dos vídeos marcados na coluna '+
# '"video_resolution_to_change"')
# print('3-Agrupar vídeos em grupos de até 1 gb com mesmo codec e resolução')
# eng
print('1-Generate worksheet listing the files')
print('2-Process reencode of videos marked in column ' +
'"video_resolution_to_change"')
print('3-Group videos into groups up to 1 gb with the same codec ' + \
'and resolution')
# ptbr
# msg_type_answer = 'Digite sua resposta: '
# eng
msg_type_answer = 'Type your answer: '
make_report = int(input(f'\n{msg_type_answer}'))
if make_report == 1:
return 1
elif make_report == 2:
return 2
elif make_report == 3:
return 3
else:
# ptbr
# msg_invalid_option = "Opção não disponível"
# eng
msg_invalid_option = "Invalid option"
raise MyValidationError(msg_invalid_option)
def df_insert_row(row_number, df, row_value):
"""
A customized function to insert a row at any given position in the
dataframe.
source: https://www.geeksforgeeks.org/insert-row-at-given-position-in-pandas-dataframe/
:input: row_number: Int.
:input: df: Dataframe.
:input: row_value: Int.
:return: Dataframe. df_result |
Boolean. False. If the row_number was invalid.
"""
if row_number > df.index.max()+1:
print("df_insert_row: Invalid row_number")
return False
# Slice the upper half of the dataframe
df1 = df[0:row_number]
# Store the result of lower half of the dataframe
df2 = df[row_number:]
# Inser the row in the upper half dataframe
df1.loc[row_number]=row_value
# Concat the two dataframes
df_result = | pd.concat([df1, df2]) | pandas.concat |
"""
The ``pvsystem`` module contains functions for modeling the output and
performance of PV modules and inverters.
"""
from collections import OrderedDict
import io
import os
from urllib.request import urlopen
import warnings
import numpy as np
import pandas as pd
from pvlib._deprecation import deprecated
from pvlib import (atmosphere, iam, irradiance, singlediode as _singlediode,
temperature)
from pvlib.tools import _build_kwargs
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
# a dict of required parameter names for each DC power model
_DC_MODEL_PARAMS = {
'sapm': set([
'A0', 'A1', 'A2', 'A3', 'A4', 'B0', 'B1', 'B2', 'B3',
'B4', 'B5', 'C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6',
'C7', 'Isco', 'Impo', 'Voco', 'Vmpo', 'Aisc', 'Aimp', 'Bvoco',
'Mbvoc', 'Bvmpo', 'Mbvmp', 'N', 'Cells_in_Series',
'IXO', 'IXXO', 'FD']),
'desoto': set([
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s']),
'cec': set([
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s', 'Adjust']),
'pvsyst': set([
'gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_s', 'alpha_sc', 'EgRef',
'cells_in_series']),
'singlediode': set([
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s']),
'pvwatts': set(['pdc0', 'gamma_pdc'])
}
def _combine_localized_attributes(pvsystem=None, location=None, **kwargs):
"""
Get and combine attributes from the pvsystem and/or location
with the rest of the kwargs.
"""
if pvsystem is not None:
pv_dict = pvsystem.__dict__
else:
pv_dict = {}
if location is not None:
loc_dict = location.__dict__
else:
loc_dict = {}
new_kwargs = dict(
list(pv_dict.items()) + list(loc_dict.items()) + list(kwargs.items())
)
return new_kwargs
# not sure if this belongs in the pvsystem module.
# maybe something more like core.py? It may eventually grow to
# import a lot more functionality from other modules.
class PVSystem(object):
"""
The PVSystem class defines a standard set of PV system attributes
and modeling functions. This class describes the collection and
interactions of PV system components rather than an installed system
on the ground. It is typically used in combination with
:py:class:`~pvlib.location.Location` and
:py:class:`~pvlib.modelchain.ModelChain`
objects.
See the :py:class:`LocalizedPVSystem` class for an object model that
describes an installed PV system.
The class supports basic system topologies consisting of:
* `N` total modules arranged in series
(`modules_per_string=N`, `strings_per_inverter=1`).
* `M` total modules arranged in parallel
(`modules_per_string=1`, `strings_per_inverter=M`).
* `NxM` total modules arranged in `M` strings of `N` modules each
(`modules_per_string=N`, `strings_per_inverter=M`).
The class is complementary to the module-level functions.
The attributes should generally be things that don't change about
the system, such the type of module and the inverter. The instance
methods accept arguments for things that do change, such as
irradiance and temperature.
Parameters
----------
surface_tilt: float or array-like, default 0
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth: float or array-like, default 180
Azimuth angle of the module surface.
North=0, East=90, South=180, West=270.
albedo : None or float, default None
The ground albedo. If ``None``, will attempt to use
``surface_type`` and ``irradiance.SURFACE_ALBEDOS``
to lookup albedo.
surface_type : None or string, default None
The ground surface type. See ``irradiance.SURFACE_ALBEDOS``
for valid values.
module : None or string, default None
The model name of the modules.
May be used to look up the module_parameters dictionary
via some other method.
module_type : None or string, default 'glass_polymer'
Describes the module's construction. Valid strings are 'glass_polymer'
and 'glass_glass'. Used for cell and module temperature calculations.
module_parameters : None, dict or Series, default None
Module parameters as defined by the SAPM, CEC, or other.
temperature_model_parameters : None, dict or Series, default None.
Temperature model parameters as defined by the SAPM, Pvsyst, or other.
modules_per_string: int or float, default 1
See system topology discussion above.
strings_per_inverter: int or float, default 1
See system topology discussion above.
inverter : None or string, default None
The model name of the inverters.
May be used to look up the inverter_parameters dictionary
via some other method.
inverter_parameters : None, dict or Series, default None
Inverter parameters as defined by the SAPM, CEC, or other.
racking_model : None or string, default 'open_rack'
Valid strings are 'open_rack', 'close_mount', and 'insulated_back'.
Used to identify a parameter set for the SAPM cell temperature model.
losses_parameters : None, dict or Series, default None
Losses parameters as defined by PVWatts or other.
name : None or string, default None
**kwargs
Arbitrary keyword arguments.
Included for compatibility, but not used.
See also
--------
pvlib.location.Location
pvlib.tracking.SingleAxisTracker
pvlib.pvsystem.LocalizedPVSystem
"""
def __init__(self,
surface_tilt=0, surface_azimuth=180,
albedo=None, surface_type=None,
module=None, module_type='glass_polymer',
module_parameters=None,
temperature_model_parameters=None,
modules_per_string=1, strings_per_inverter=1,
inverter=None, inverter_parameters=None,
racking_model='open_rack', losses_parameters=None, name=None,
**kwargs):
self.surface_tilt = surface_tilt
self.surface_azimuth = surface_azimuth
# could tie these together with @property
self.surface_type = surface_type
if albedo is None:
self.albedo = irradiance.SURFACE_ALBEDOS.get(surface_type, 0.25)
else:
self.albedo = albedo
# could tie these together with @property
self.module = module
if module_parameters is None:
self.module_parameters = {}
else:
self.module_parameters = module_parameters
self.module_type = module_type
self.racking_model = racking_model
if temperature_model_parameters is None:
self.temperature_model_parameters = \
self._infer_temperature_model_params()
# TODO: in v0.8 check if an empty dict is returned and raise error
else:
self.temperature_model_parameters = temperature_model_parameters
# TODO: deprecated behavior if PVSystem.temperature_model_parameters
# are not specified. Remove in v0.8
if not any(self.temperature_model_parameters):
warnings.warn(
'Required temperature_model_parameters is not specified '
'and parameters are not inferred from racking_model and '
'module_type. Reverting to deprecated default: SAPM cell '
'temperature model parameters for a glass/glass module in '
'open racking. In the future '
'PVSystem.temperature_model_parameters will be required',
pvlibDeprecationWarning)
params = temperature._temperature_model_params(
'sapm', 'open_rack_glass_glass')
self.temperature_model_parameters = params
self.modules_per_string = modules_per_string
self.strings_per_inverter = strings_per_inverter
self.inverter = inverter
if inverter_parameters is None:
self.inverter_parameters = {}
else:
self.inverter_parameters = inverter_parameters
if losses_parameters is None:
self.losses_parameters = {}
else:
self.losses_parameters = losses_parameters
self.name = name
def __repr__(self):
attrs = ['name', 'surface_tilt', 'surface_azimuth', 'module',
'inverter', 'albedo', 'racking_model']
return ('PVSystem: \n ' + '\n '.join(
('{}: {}'.format(attr, getattr(self, attr)) for attr in attrs)))
def get_aoi(self, solar_zenith, solar_azimuth):
"""Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence
"""
aoi = irradiance.aoi(self.surface_tilt, self.surface_azimuth,
solar_zenith, solar_azimuth)
return aoi
def get_irradiance(self, solar_zenith, solar_azimuth, dni, ghi, dhi,
dni_extra=None, airmass=None, model='haydavies',
**kwargs):
"""
Uses the :py:func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by ``self.surface_tilt``,
``self.surface_azimuth``, and ``self.albedo``.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : None, float or Series, default None
Extraterrestrial direct normal irradiance
airmass : None, float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
kwargs
Extra parameters passed to :func:`irradiance.get_total_irradiance`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(solar_zenith.index)
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
return irradiance.get_total_irradiance(self.surface_tilt,
self.surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.albedo,
**kwargs)
def get_iam(self, aoi, iam_model='physical'):
"""
Determine the incidence angle modifier using the method specified by
``iam_model``.
Parameters for the selected IAM model are expected to be in
``PVSystem.module_parameters``. Default parameters are available for
the 'physical', 'ashrae' and 'martin_ruiz' models.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
aoi_model : string, default 'physical'
The IAM model to be used. Valid strings are 'physical', 'ashrae',
'martin_ruiz' and 'sapm'.
Returns
-------
iam : numeric
The AOI modifier.
Raises
------
ValueError if `iam_model` is not a valid model name.
"""
model = iam_model.lower()
if model in ['ashrae', 'physical', 'martin_ruiz']:
param_names = iam._IAM_MODEL_PARAMS[model]
kwargs = _build_kwargs(param_names, self.module_parameters)
func = getattr(iam, model)
return func(aoi, **kwargs)
elif model == 'sapm':
return iam.sapm(aoi, self.module_parameters)
elif model == 'interp':
raise ValueError(model + ' is not implemented as an IAM model'
'option for PVSystem')
else:
raise ValueError(model + ' is not a valid IAM model')
def ashraeiam(self, aoi):
"""
Deprecated. Use ``PVSystem.get_iam`` instead.
"""
import warnings
warnings.warn('PVSystem.ashraeiam is deprecated and will be removed in'
'v0.8, use PVSystem.get_iam instead',
pvlibDeprecationWarning)
return PVSystem.get_iam(self, aoi, iam_model='ashrae')
def physicaliam(self, aoi):
"""
Deprecated. Use ``PVSystem.get_iam`` instead.
"""
import warnings
warnings.warn('PVSystem.physicaliam is deprecated and will be removed'
' in v0.8, use PVSystem.get_iam instead',
pvlibDeprecationWarning)
return PVSystem.get_iam(self, aoi, iam_model='physical')
def calcparams_desoto(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`calcparams_desoto` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
**kwargs
See pvsystem.calcparams_desoto for details
Returns
-------
See pvsystem.calcparams_desoto for details
"""
kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'alpha_sc', 'EgRef', 'dEgdT',
'irrad_ref', 'temp_ref'],
self.module_parameters)
return calcparams_desoto(effective_irradiance, temp_cell, **kwargs)
def calcparams_cec(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`calcparams_cec` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
**kwargs
See pvsystem.calcparams_cec for details
Returns
-------
See pvsystem.calcparams_cec for details
"""
kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'alpha_sc', 'Adjust', 'EgRef', 'dEgdT',
'irrad_ref', 'temp_ref'],
self.module_parameters)
return calcparams_cec(effective_irradiance, temp_cell, **kwargs)
def calcparams_pvsyst(self, effective_irradiance, temp_cell):
"""
Use the :py:func:`calcparams_pvsyst` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
Returns
-------
See pvsystem.calcparams_pvsyst for details
"""
kwargs = _build_kwargs(['gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_sh_exp',
'R_s', 'alpha_sc', 'EgRef',
'irrad_ref', 'temp_ref',
'cells_in_series'],
self.module_parameters)
return calcparams_pvsyst(effective_irradiance, temp_cell, **kwargs)
def sapm(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`sapm` function, the input parameters,
and ``self.module_parameters`` to calculate
Voc, Isc, Ix, Ixx, Vmp, and Imp.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
kwargs
See pvsystem.sapm for details
Returns
-------
See pvsystem.sapm for details
"""
return sapm(effective_irradiance, temp_cell, self.module_parameters)
def sapm_celltemp(self, poa_global, temp_air, wind_speed):
"""Uses :py:func:`temperature.sapm_cell` to calculate cell
temperatures.
Parameters
----------
poa_global : numeric
Total incident irradiance in W/m^2.
temp_air : numeric
Ambient dry bulb temperature in degrees C.
wind_speed : numeric
Wind speed in m/s at a height of 10 meters.
Returns
-------
numeric, values in degrees C.
"""
kwargs = _build_kwargs(['a', 'b', 'deltaT'],
self.temperature_model_parameters)
return temperature.sapm_cell(poa_global, temp_air, wind_speed,
**kwargs)
def _infer_temperature_model_params(self):
# try to infer temperature model parameters from from racking_model
# and module_type
param_set = self.racking_model + '_' + self.module_type
if param_set in temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']:
return temperature._temperature_model_params('sapm', param_set)
elif 'freestanding' in param_set:
return temperature._temperature_model_params('pvsyst',
'freestanding')
elif 'insulated' in param_set: # after SAPM to avoid confusing keys
return temperature._temperature_model_params('pvsyst',
'insulated')
else:
return {}
def sapm_spectral_loss(self, airmass_absolute):
"""
Use the :py:func:`sapm_spectral_loss` function, the input
parameters, and ``self.module_parameters`` to calculate F1.
Parameters
----------
airmass_absolute : numeric
Absolute airmass.
Returns
-------
F1 : numeric
The SAPM spectral loss coefficient.
"""
return sapm_spectral_loss(airmass_absolute, self.module_parameters)
def sapm_aoi_loss(self, aoi):
"""
Deprecated. Use ``PVSystem.get_iam`` instead.
"""
import warnings
warnings.warn('PVSystem.sapm_aoi_loss is deprecated and will be'
' removed in v0.8, use PVSystem.get_iam instead',
pvlibDeprecationWarning)
return PVSystem.get_iam(self, aoi, iam_model='sapm')
def sapm_effective_irradiance(self, poa_direct, poa_diffuse,
airmass_absolute, aoi,
reference_irradiance=1000):
"""
Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module. [W/m2]
poa_diffuse : numeric
The diffuse irradiance incident on module. [W/m2]
airmass_absolute : numeric
Absolute airmass. [unitless]
aoi : numeric
Angle of incidence. [degrees]
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance. [W/m2]
"""
return sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi,
self.module_parameters)
def pvsyst_celltemp(self, poa_global, temp_air, wind_speed=1.0):
"""Uses :py:func:`temperature.pvsyst_cell` to calculate cell
temperature.
Parameters
----------
poa_global : numeric
Total incident irradiance in W/m^2.
temp_air : numeric
Ambient dry bulb temperature in degrees C.
wind_speed : numeric, default 1.0
Wind speed in m/s measured at the same height for which the wind
loss factor was determined. The default value is 1.0, which is
the wind speed at module height used to determine NOCT.
eta_m : numeric, default 0.1
Module external efficiency as a fraction, i.e.,
DC power / poa_global.
alpha_absorption : numeric, default 0.9
Absorption coefficient
Returns
-------
numeric, values in degrees C.
"""
kwargs = _build_kwargs(['eta_m', 'alpha_absorption'],
self.module_parameters)
kwargs.update(_build_kwargs(['u_c', 'u_v'],
self.temperature_model_parameters))
return temperature.pvsyst_cell(poa_global, temp_air, wind_speed,
**kwargs)
def first_solar_spectral_loss(self, pw, airmass_absolute):
"""
Use the :py:func:`first_solar_spectral_correction` function to
calculate the spectral loss modifier. The model coefficients are
specific to the module's cell type, and are determined by searching
for one of the following keys in self.module_parameters (in order):
'first_solar_spectral_coefficients' (user-supplied coefficients)
'Technology' - a string describing the cell type, can be read from
the CEC module parameter database
'Material' - a string describing the cell type, can be read from
the Sandia module database.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
"""
if 'first_solar_spectral_coefficients' in \
self.module_parameters.keys():
coefficients = \
self.module_parameters['first_solar_spectral_coefficients']
module_type = None
else:
module_type = self._infer_cell_type()
coefficients = None
return atmosphere.first_solar_spectral_correction(pw,
airmass_absolute,
module_type,
coefficients)
def _infer_cell_type(self):
"""
Examines module_parameters and maps the Technology key for the CEC
database and the Material key for the Sandia database to a common
list of strings for cell type.
Returns
-------
cell_type: str
"""
_cell_type_dict = {'Multi-c-Si': 'multisi',
'Mono-c-Si': 'monosi',
'Thin Film': 'cigs',
'a-Si/nc': 'asi',
'CIS': 'cigs',
'CIGS': 'cigs',
'1-a-Si': 'asi',
'CdTe': 'cdte',
'a-Si': 'asi',
'2-a-Si': None,
'3-a-Si': None,
'HIT-Si': 'monosi',
'mc-Si': 'multisi',
'c-Si': 'multisi',
'Si-Film': 'asi',
'EFG mc-Si': 'multisi',
'GaAs': None,
'a-Si / mono-Si': 'monosi'}
if 'Technology' in self.module_parameters.keys():
# CEC module parameter set
cell_type = _cell_type_dict[self.module_parameters['Technology']]
elif 'Material' in self.module_parameters.keys():
# Sandia module parameter set
cell_type = _cell_type_dict[self.module_parameters['Material']]
else:
cell_type = None
return cell_type
def singlediode(self, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
ivcurve_pnts=None):
"""Wrapper around the :py:func:`singlediode` function.
Parameters
----------
See pvsystem.singlediode for details
Returns
-------
See pvsystem.singlediode for details
"""
return singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
ivcurve_pnts=ivcurve_pnts)
def i_from_v(self, resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
"""Wrapper around the :py:func:`i_from_v` function.
Parameters
----------
See pvsystem.i_from_v for details
Returns
-------
See pvsystem.i_from_v for details
"""
return i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent)
# inverter now specified by self.inverter_parameters
def snlinverter(self, v_dc, p_dc):
"""Uses :func:`snlinverter` to calculate AC power based on
``self.inverter_parameters`` and the input parameters.
Parameters
----------
See pvsystem.snlinverter for details
Returns
-------
See pvsystem.snlinverter for details
"""
return snlinverter(v_dc, p_dc, self.inverter_parameters)
def adrinverter(self, v_dc, p_dc):
return adrinverter(v_dc, p_dc, self.inverter_parameters)
def scale_voltage_current_power(self, data):
"""
Scales the voltage, current, and power of the DataFrames
returned by :py:func:`singlediode` and :py:func:`sapm`
by `self.modules_per_string` and `self.strings_per_inverter`.
Parameters
----------
data: DataFrame
Must contain columns `'v_mp', 'v_oc', 'i_mp' ,'i_x', 'i_xx',
'i_sc', 'p_mp'`.
Returns
-------
scaled_data: DataFrame
A scaled copy of the input data.
"""
return scale_voltage_current_power(data,
voltage=self.modules_per_string,
current=self.strings_per_inverter)
def pvwatts_dc(self, g_poa_effective, temp_cell):
"""
Calcuates DC power according to the PVWatts model using
:py:func:`pvwatts_dc`, `self.module_parameters['pdc0']`, and
`self.module_parameters['gamma_pdc']`.
See :py:func:`pvwatts_dc` for details.
"""
kwargs = _build_kwargs(['temp_ref'], self.module_parameters)
return pvwatts_dc(g_poa_effective, temp_cell,
self.module_parameters['pdc0'],
self.module_parameters['gamma_pdc'],
**kwargs)
def pvwatts_losses(self):
"""
Calculates DC power losses according the PVwatts model using
:py:func:`pvwatts_losses` and ``self.losses_parameters``.`
See :py:func:`pvwatts_losses` for details.
"""
kwargs = _build_kwargs(['soiling', 'shading', 'snow', 'mismatch',
'wiring', 'connections', 'lid',
'nameplate_rating', 'age', 'availability'],
self.losses_parameters)
return pvwatts_losses(**kwargs)
def pvwatts_ac(self, pdc):
"""
Calculates AC power according to the PVWatts model using
:py:func:`pvwatts_ac`, `self.module_parameters['pdc0']`, and
`eta_inv_nom=self.inverter_parameters['eta_inv_nom']`.
See :py:func:`pvwatts_ac` for details.
"""
kwargs = _build_kwargs(['eta_inv_nom', 'eta_inv_ref'],
self.inverter_parameters)
return pvwatts_ac(pdc, self.inverter_parameters['pdc0'], **kwargs)
def localize(self, location=None, latitude=None, longitude=None,
**kwargs):
"""Creates a LocalizedPVSystem object using this object
and location data. Must supply either location object or
latitude, longitude, and any location kwargs
Parameters
----------
location : None or Location, default None
latitude : None or float, default None
longitude : None or float, default None
**kwargs : see Location
Returns
-------
localized_system : LocalizedPVSystem
"""
if location is None:
location = Location(latitude, longitude, **kwargs)
return LocalizedPVSystem(pvsystem=self, location=location)
class LocalizedPVSystem(PVSystem, Location):
"""
The LocalizedPVSystem class defines a standard set of installed PV
system attributes and modeling functions. This class combines the
attributes and methods of the PVSystem and Location classes.
The LocalizedPVSystem may have bugs due to the difficulty of
robustly implementing multiple inheritance. See
:py:class:`~pvlib.modelchain.ModelChain` for an alternative paradigm
for modeling PV systems at specific locations.
"""
def __init__(self, pvsystem=None, location=None, **kwargs):
new_kwargs = _combine_localized_attributes(
pvsystem=pvsystem,
location=location,
**kwargs,
)
PVSystem.__init__(self, **new_kwargs)
Location.__init__(self, **new_kwargs)
def __repr__(self):
attrs = ['name', 'latitude', 'longitude', 'altitude', 'tz',
'surface_tilt', 'surface_azimuth', 'module', 'inverter',
'albedo', 'racking_model']
return ('LocalizedPVSystem: \n ' + '\n '.join(
('{}: {}'.format(attr, getattr(self, attr)) for attr in attrs)))
def systemdef(meta, surface_tilt, surface_azimuth, albedo, modules_per_string,
strings_per_inverter):
'''
Generates a dict of system parameters used throughout a simulation.
Parameters
----------
meta : dict
meta dict either generated from a TMY file using readtmy2 or
readtmy3, or a dict containing at least the following fields:
=============== ====== ====================
meta field format description
=============== ====== ====================
meta.altitude Float site elevation
meta.latitude Float site latitude
meta.longitude Float site longitude
meta.Name String site name
meta.State String state
meta.TZ Float timezone
=============== ====== ====================
surface_tilt : float or Series
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : float or Series
Surface azimuth angles in decimal degrees.
The azimuth convention is defined
as degrees east of north
(North=0, South=180, East=90, West=270).
albedo : float or Series
Ground reflectance, typically 0.1-0.4 for surfaces on Earth
(land), may increase over snow, ice, etc. May also be known as
the reflection coefficient. Must be >=0 and <=1.
modules_per_string : int
Number of modules connected in series in a string.
strings_per_inverter : int
Number of strings connected in parallel.
Returns
-------
Result : dict
A dict with the following fields.
* 'surface_tilt'
* 'surface_azimuth'
* 'albedo'
* 'modules_per_string'
* 'strings_per_inverter'
* 'latitude'
* 'longitude'
* 'tz'
* 'name'
* 'altitude'
See also
--------
pvlib.tmy.readtmy3
pvlib.tmy.readtmy2
'''
try:
name = meta['Name']
except KeyError:
name = meta['City']
system = {'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
'albedo': albedo,
'modules_per_string': modules_per_string,
'strings_per_inverter': strings_per_inverter,
'latitude': meta['latitude'],
'longitude': meta['longitude'],
'tz': meta['TZ'],
'name': name,
'altitude': meta['altitude']}
return system
def calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]_. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]_) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] <NAME> et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
.. [3] <NAME>, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
.. [4] <NAME>, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
# test for use of function pre-v0.6.0 API change
if isinstance(a_ref, dict) or \
(isinstance(a_ref, pd.Series) and ('a_ref' in a_ref.keys())):
import warnings
warnings.warn('module_parameters detected as fourth positional'
+ ' argument of calcparams_desoto. calcparams_desoto'
+ ' will require one argument for each module model'
+ ' parameter in v0.7.0 and later', DeprecationWarning)
try:
module_parameters = a_ref
a_ref = module_parameters['a_ref']
I_L_ref = module_parameters['I_L_ref']
I_o_ref = module_parameters['I_o_ref']
R_sh_ref = module_parameters['R_sh_ref']
R_s = module_parameters['R_s']
except Exception as e:
raise e('Module parameters could not be extracted from fourth'
+ ' positional argument of calcparams_desoto. Check that'
+ ' parameters are from the CEC database and/or update'
+ ' your code for the new API for calcparams_desoto')
# Boltzmann constant in eV/K
k = 8.617332478e-05
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
# In the equation for IL, the single factor effective_irradiance is
# used, in place of the product S*M in [1]. effective_irradiance is
# equivalent to the product of S (irradiance reaching a module's cells) *
# M (spectral adjustment factor) as described in [1].
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = (I_o_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))))
# Note that the equation for Rsh differs from [1]. In [1] Rsh is given as
# Rsh = Rsh_ref * (S_ref / S) where S is broadband irradiance reaching
# the module's cells. If desired this model behavior can be duplicated
# by applying reflection and soiling losses to broadband plane of array
# irradiance and not applying a spectral loss modifier, i.e.,
# spectral_modifier = 1.0.
# use errstate to silence divide by warning
with np.errstate(divide='ignore'):
Rsh = R_sh_ref * (irrad_ref / effective_irradiance)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth
def calcparams_cec(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
Adjust, EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the CEC
model described in [1]_. The CEC model differs from the De soto et al.
model [3]_ by the parameter Adjust. The five values returned by
calcparams_cec can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
Adjust : float
The adjustment to the temperature coefficient for short circuit
current, in percent
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [3]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] <NAME>, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
.. [3] <NAME> et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
See Also
--------
calcparams_desoto
singlediode
retrieve_sam
'''
# pass adjusted temperature coefficient to desoto
return calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc*(1.0 - Adjust/100),
a_ref, I_L_ref, I_o_ref,
R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25)
def calcparams_pvsyst(effective_irradiance, temp_cell,
alpha_sc, gamma_ref, mu_gamma,
I_L_ref, I_o_ref,
R_sh_ref, R_sh_0, R_s,
cells_in_series,
R_sh_exp=5.5,
EgRef=1.121,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the PVsyst v6
model described in [1]_, [2]_, [3]_. The five values returned by
calcparams_pvsyst can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
gamma_ref : float
The diode ideality factor
mu_gamma : float
The temperature coefficient for the diode ideality factor, 1/K
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_sh_0 : float
The shunt resistance at zero irradiance conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
cells_in_series : integer
The number of cells connected in series.
R_sh_exp : float
The exponent in the equation for shunt resistance, unitless. Defaults
to 5.5.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation current in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, Modeling the Irradiance and
Temperature Dependence of Photovoltaic Modules in PVsyst,
IEEE Journal of Photovoltaics v5(1), January 2015.
.. [2] <NAME>, PV modules modelling, Presentation at the 2nd PV
Performance Modeling Workshop, Santa Clara, CA, May 2013
.. [3] <NAME>, <NAME>, Performance Assessment of a Simulation Model
for PV modules of any available technology, 25th European Photovoltaic
Solar Energy Conference, Valencia, Spain, Sept. 2010
See Also
--------
calcparams_desoto
singlediode
'''
# Boltzmann constant in J/K
k = 1.38064852e-23
# elementary charge in coulomb
q = 1.6021766e-19
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
gamma = gamma_ref + mu_gamma * (Tcell_K - Tref_K)
nNsVth = gamma * k / q * cells_in_series * Tcell_K
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = I_o_ref * ((Tcell_K / Tref_K) ** 3) * \
(np.exp((q * EgRef) / (k * gamma) * (1 / Tref_K - 1 / Tcell_K)))
Rsh_tmp = \
(R_sh_ref - R_sh_0 * np.exp(-R_sh_exp)) / (1.0 - np.exp(-R_sh_exp))
Rsh_base = np.maximum(0.0, Rsh_tmp)
Rsh = Rsh_base + (R_sh_0 - Rsh_base) * \
np.exp(-R_sh_exp * effective_irradiance / irrad_ref)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth
def retrieve_sam(name=None, path=None):
'''
Retrieve latest module and inverter info from a local file or the
SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* CEC Inverter database
* Anton Driesse Inverter database
and return it as a pandas DataFrame.
Parameters
----------
name : None or string, default None
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'CECInverter' - returns the CEC Inverter database
* 'SandiaInverter' - returns the CEC Inverter database
(CEC is only current inverter db available; tag kept for
backwards compatibility)
* 'SandiaMod' - returns the Sandia Module database
* 'ADRInverter' - returns the ADR Inverter database
path : None or string, default None
Path to the SAM file. May also be a URL.
Returns
-------
samfile : DataFrame
A DataFrame containing all the elements of the desired database.
Each column represents a module or inverter, and a specific
dataset can be retrieved by the command
Raises
------
ValueError
If no name or path is provided.
Notes
-----
Files available at
https://github.com/NREL/SAM/tree/develop/deploy/libraries
Documentation for module and inverter data sets:
https://sam.nrel.gov/photovoltaic/pv-sub-page-2.html
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam('CECInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data')
if name == 'cecmod':
csvdata = os.path.join(
data_path, 'sam-library-cec-modules-2019-03-05.csv')
elif name == 'sandiamod':
csvdata = os.path.join(
data_path, 'sam-library-sandia-modules-2015-6-30.csv')
elif name == 'adrinverter':
csvdata = os.path.join(data_path, 'adr-library-2013-10-01.csv')
elif name in ['cecinverter', 'sandiainverter']:
# Allowing either, to provide for old code,
# while aligning with current expectations
csvdata = os.path.join(
data_path, 'sam-library-cec-inverters-2019-03-05.csv')
else:
raise ValueError('invalid name {}'.format(name))
elif path is not None:
if path.startswith('http'):
response = urlopen(path)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
csvdata = path
elif name is None and path is None:
raise ValueError("A name or path must be provided!")
return _parse_raw_sam_df(csvdata)
def _normalize_sam_product_names(names):
'''
Replace special characters within the product names to make them more
suitable for use as Dataframe column names.
'''
# Contributed by <NAME> (@adriesse), PV Performance Labs. July, 2019
import warnings
BAD_CHARS = ' -.()[]:+/",'
GOOD_CHARS = '____________'
mapping = str.maketrans(BAD_CHARS, GOOD_CHARS)
names = pd.Series(data=names)
norm_names = names.str.translate(mapping)
n_duplicates = names.duplicated().sum()
if n_duplicates > 0:
warnings.warn('Original names contain %d duplicate(s).' % n_duplicates)
n_duplicates = norm_names.duplicated().sum()
if n_duplicates > 0:
warnings.warn('Normalized names contain %d duplicate(s).' % n_duplicates)
return norm_names.values
def _parse_raw_sam_df(csvdata):
df = pd.read_csv(csvdata, index_col=0, skiprows=[1, 2])
df.columns = df.columns.str.replace(' ', '_')
df.index = _normalize_sam_product_names(df.index)
df = df.transpose()
if 'ADRCoefficients' in df.index:
ad_ce = 'ADRCoefficients'
# for each inverter, parses a string of coefficients like
# ' 1.33, 2.11, 3.12' into a list containing floats:
# [1.33, 2.11, 3.12]
df.loc[ad_ce] = df.loc[ad_ce].map(lambda x: list(
map(float, x.strip(' []').split())))
return df
def sapm(effective_irradiance, temp_cell, module):
'''
The Sandia PV Array Performance Model (SAPM) generates 5 points on a
PV module's I-V curve (Voc, Isc, Ix, Ixx, Vmp/Imp) according to
SAND2004-3535. Assumes a reference cell temperature of 25 C.
Parameters
----------
effective_irradiance : numeric
Irradiance reaching the module's cells, after reflections and
adjustment for spectrum. [W/m2]
temp_cell : numeric
Cell temperature [C].
module : dict-like
A dict or Series defining the SAPM parameters. See the notes section
for more details.
Returns
-------
A DataFrame with the columns:
* i_sc : Short-circuit current (A)
* i_mp : Current at the maximum-power point (A)
* v_oc : Open-circuit voltage (V)
* v_mp : Voltage at maximum-power point (V)
* p_mp : Power at maximum-power point (W)
* i_x : Current at module V = 0.5Voc, defines 4th point on I-V
curve for modeling curve shape
* i_xx : Current at module V = 0.5(Voc+Vmp), defines 5th point on
I-V curve for modeling curve shape
Notes
-----
The SAPM parameters which are required in ``module`` are
listed in the following table.
The Sandia module database contains parameter values for a limited set
of modules. The CEC module database does not contain these parameters.
Both databases can be accessed using :py:func:`retrieve_sam`.
================ ========================================================
Key Description
================ ========================================================
A0-A4 The airmass coefficients used in calculating
effective irradiance
B0-B5 The angle of incidence coefficients used in calculating
effective irradiance
C0-C7 The empirically determined coefficients relating
Imp, Vmp, Ix, and Ixx to effective irradiance
Isco Short circuit current at reference condition (amps)
Impo Maximum power current at reference condition (amps)
Voco Open circuit voltage at reference condition (amps)
Vmpo Maximum power voltage at reference condition (amps)
Aisc Short circuit current temperature coefficient at
reference condition (1/C)
Aimp Maximum power current temperature coefficient at
reference condition (1/C)
Bvoco Open circuit voltage temperature coefficient at
reference condition (V/C)
Mbvoc Coefficient providing the irradiance dependence for the
BetaVoc temperature coefficient at reference irradiance
(V/C)
Bvmpo Maximum power voltage temperature coefficient at
reference condition
Mbvmp Coefficient providing the irradiance dependence for the
BetaVmp temperature coefficient at reference irradiance
(V/C)
N Empirically determined "diode factor" (dimensionless)
Cells_in_Series Number of cells in series in a module's cell string(s)
IXO Ix at reference conditions
IXXO Ixx at reference conditions
FD Fraction of diffuse irradiance used by module
================ ========================================================
References
----------
.. [1] <NAME> al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
See Also
--------
retrieve_sam
temperature.sapm_cell
temperature.sapm_module
'''
# TODO: someday, change temp_ref and irrad_ref to reference_temperature and
# reference_irradiance and expose
temp_ref = 25
irrad_ref = 1000
# TODO: remove this warning in v0.8 after deprecation period for change in
# effective irradiance units, made in v0.7
with np.errstate(invalid='ignore'): # turn off warning for NaN
ee = np.asarray(effective_irradiance)
ee_gt0 = ee[ee > 0.0]
if ee_gt0.size > 0 and np.all(ee_gt0 < 2.0):
import warnings
msg = 'effective_irradiance inputs appear to be in suns. Units ' \
'changed in v0.7 from suns to W/m2'
warnings.warn(msg, RuntimeWarning)
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64') / irrad_ref
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
Bvmpo = module['Bvmpo'] + module['Mbvmp']*(1 - Ee)
Bvoco = module['Bvoco'] + module['Mbvoc']*(1 - Ee)
delta = module['N'] * kb * (temp_cell + 273.15) / q
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
# avoid repeated __getitem__
cells_in_series = module['Cells_in_Series']
out = OrderedDict()
out['i_sc'] = (
module['Isco'] * Ee * (1 + module['Aisc']*(temp_cell - temp_ref)))
out['i_mp'] = (
module['Impo'] * (module['C0']*Ee + module['C1']*(Ee**2)) *
(1 + module['Aimp']*(temp_cell - temp_ref)))
out['v_oc'] = np.maximum(0, (
module['Voco'] + cells_in_series * delta * logEe +
Bvoco*(temp_cell - temp_ref)))
out['v_mp'] = np.maximum(0, (
module['Vmpo'] +
module['C2'] * cells_in_series * delta * logEe +
module['C3'] * cells_in_series * ((delta * logEe) ** 2) +
Bvmpo*(temp_cell - temp_ref)))
out['p_mp'] = out['i_mp'] * out['v_mp']
out['i_x'] = (
module['IXO'] * (module['C4']*Ee + module['C5']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - temp_ref)))
# the Ixx calculation in King 2004 has a typo (mixes up Aisc and Aimp)
out['i_xx'] = (
module['IXXO'] * (module['C6']*Ee + module['C7']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - temp_ref)))
if isinstance(out['i_sc'], pd.Series):
out = pd.DataFrame(out)
return out
def _sapm_celltemp_translator(*args, **kwargs):
# TODO: remove this function after deprecation period for sapm_celltemp
new_kwargs = {}
# convert position arguments to kwargs
old_arg_list = ['poa_global', 'wind_speed', 'temp_air', 'model']
for pos in range(len(args)):
new_kwargs[old_arg_list[pos]] = args[pos]
# determine value for new kwarg 'model'
try:
param_set = new_kwargs['model']
new_kwargs.pop('model') # model is not a new kwarg
except KeyError:
# 'model' not in positional arguments, check kwargs
try:
param_set = kwargs['model']
kwargs.pop('model')
except KeyError:
# 'model' not in kwargs, use old default value
param_set = 'open_rack_glass_glass'
if type(param_set) is list:
new_kwargs.update({'a': param_set[0],
'b': param_set[1],
'deltaT': param_set[2]})
elif type(param_set) is dict:
new_kwargs.update(param_set)
else: # string
params = temperature._temperature_model_params('sapm', param_set)
new_kwargs.update(params)
new_kwargs.update(kwargs) # kwargs with unchanged names
new_kwargs['irrad_ref'] = 1000 # default for new kwarg
# convert old positional arguments to named kwargs
return temperature.sapm_cell(**new_kwargs)
sapm_celltemp = deprecated('0.7', alternative='temperature.sapm_cell',
name='sapm_celltemp', removal='0.8',
addendum='Note that the arguments and argument '
'order for temperature.sapm_cell are different '
'than for sapm_celltemp')(_sapm_celltemp_translator)
def _pvsyst_celltemp_translator(*args, **kwargs):
# TODO: remove this function after deprecation period for pvsyst_celltemp
new_kwargs = {}
# convert position arguments to kwargs
old_arg_list = ['poa_global', 'temp_air', 'wind_speed', 'eta_m',
'alpha_absorption', 'model_params']
for pos in range(len(args)):
new_kwargs[old_arg_list[pos]] = args[pos]
# determine value for new kwarg 'model'
try:
param_set = new_kwargs['model_params']
new_kwargs.pop('model_params') # model_params is not a new kwarg
except KeyError:
# 'model_params' not in positional arguments, check kwargs
try:
param_set = kwargs['model_params']
kwargs.pop('model_params')
except KeyError:
# 'model_params' not in kwargs, use old default value
param_set = 'freestanding'
if type(param_set) in (list, tuple):
new_kwargs.update({'u_c': param_set[0],
'u_v': param_set[1]})
else: # string
params = temperature._temperature_model_params('pvsyst', param_set)
new_kwargs.update(params)
new_kwargs.update(kwargs) # kwargs with unchanged names
# convert old positional arguments to named kwargs
return temperature.pvsyst_cell(**new_kwargs)
pvsyst_celltemp = deprecated(
'0.7', alternative='temperature.pvsyst_cell', name='pvsyst_celltemp',
removal='0.8', addendum='Note that the argument names for '
'temperature.pvsyst_cell are different than '
'for pvsyst_celltemp')(_pvsyst_celltemp_translator)
def sapm_spectral_loss(airmass_absolute, module):
"""
Calculates the SAPM spectral loss coefficient, F1.
Parameters
----------
airmass_absolute : numeric
Absolute airmass
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
Returns
-------
F1 : numeric
The SAPM spectral loss coefficient.
Notes
-----
nan airmass values will result in 0 output.
"""
am_coeff = [module['A4'], module['A3'], module['A2'], module['A1'],
module['A0']]
spectral_loss = np.polyval(am_coeff, airmass_absolute)
spectral_loss = np.where(np.isnan(spectral_loss), 0, spectral_loss)
spectral_loss = np.maximum(0, spectral_loss)
if isinstance(airmass_absolute, pd.Series):
spectral_loss = pd.Series(spectral_loss, airmass_absolute.index)
return spectral_loss
def sapm_effective_irradiance(poa_direct, poa_diffuse, airmass_absolute, aoi,
module):
r"""
Calculates the SAPM effective irradiance using the SAPM spectral
loss and SAPM angle of incidence loss functions.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module. [W/m2]
poa_diffuse : numeric
The diffuse irradiance incident on module. [W/m2]
airmass_absolute : numeric
Absolute airmass. [unitless]
aoi : numeric
Angle of incidence. [degrees]
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
Returns
-------
effective_irradiance : numeric
Effective irradiance accounting for reflections and spectral content.
[W/m2]
Notes
-----
The SAPM model for effective irradiance [1]_ translates broadband direct
and diffuse irradiance on the plane of array to the irradiance absorbed by
a module's cells.
The model is
.. math::
`Ee = f_1(AM_a) (E_b f_2(AOI) + f_d E_d)`
where :math:`Ee` is effective irradiance (W/m2), :math:`f_1` is a fourth
degree polynomial in air mass :math:`AM_a`, :math:`E_b` is beam (direct)
irradiance on the plane of array, :math:`E_d` is diffuse irradiance on the
plane of array, :math:`f_2` is a fifth degree polynomial in the angle of
incidence :math:`AOI`, and :math:`f_d` is the fraction of diffuse
irradiance on the plane of array that is not reflected away.
References
----------
.. [1] <NAME> et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
See also
--------
pvlib.iam.sapm
pvlib.pvsystem.sapm_spectral_loss
pvlib.pvsystem.sapm
"""
F1 = sapm_spectral_loss(airmass_absolute, module)
F2 = iam.sapm(aoi, module)
Ee = F1 * (poa_direct * F2 + module['FD'] * poa_diffuse)
return Ee
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = IL - I0*[exp((V+I*Rs)/(nNsVth))-1] - (V + I*Rs)/Rsh
for ``I`` and ``V`` when given ``IL, I0, Rs, Rsh,`` and ``nNsVth
(nNsVth = n*Ns*Vth)`` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all IL, I0, Rs, Rsh, and nNsVth are scalar, a
single curve will be returned, if any are Series (of the same
length), multiple IV curves will be calculated.
The input parameters can be calculated using calcparams_desoto from
meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] <NAME>, <NAME>, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] <NAME> et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = | pd.DataFrame(out, index=photocurrent.index) | pandas.DataFrame |
from datetime import datetime, timedelta
import itertools
import netCDF4
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
def valid_maxz(maxz, threshold=0.7):
num_nan = np.isnan(maxz).sum(axis=1)
valid = (num_nan/maxz.shape[1] < threshold)
return valid
def prepare_scalar_dataset(features_fn, valid_frac=0.1, test_frac=0.1, random_seed=1234):
with netCDF4.Dataset(features_fn, 'r') as ds:
maxz = ds["past_features/nexrad::MAXZ::nbagg-mean-25-circular"][:]
valid = valid_maxz(maxz)
# load time data and group by day
time = np.array(ds.variables["time"][:], copy=False)
n_total = len(time)
epoch = datetime(1970,1,1)
time = [epoch+timedelta(seconds=int(t)) for t in time]
indices_by_day = {}
for (i,t) in enumerate(time):
date = t.date()
if valid[i]:
if date not in indices_by_day:
indices_by_day[date] = []
indices_by_day[date].append(i)
rng = np.random.RandomState(seed=random_seed)
# select at least valid_frac fraction of data for validation
# using entire days
valid_indices = []
while len(valid_indices) / n_total < valid_frac:
day = rng.choice(list(indices_by_day), 1)[0]
valid_indices += indices_by_day[day]
del indices_by_day[day]
# then select test_frac for testing
test_indices = []
while len(test_indices) / n_total < test_frac:
day = rng.choice(list(indices_by_day), 1)[0]
test_indices += indices_by_day[day]
del indices_by_day[day]
# the rest of the data go to the training set
train_indices = list(
itertools.chain.from_iterable(indices_by_day.values())
)
indices = {
"train": np.array(train_indices),
"valid": np.array(valid_indices),
"test": np.array(test_indices)
}
for dataset in ["train", "valid", "test"]:
rng.shuffle(indices[dataset])
print("Training N={}, validation N={}, test N={}".format(
len(train_indices), len(valid_indices), len(test_indices)
))
# load data from file
past_features = {"train": {}, "valid": {}, "test": {}}
future_features = {"train": {}, "valid": {}, "test": {}}
for (features, group) in [
(past_features, "past_features"),
(future_features, "future_features")
]:
for feat in sorted(ds[group].variables.keys()):
(data_source, var, feat_type) = feat.split("::")
if feat_type.startswith("nbagg"):
print(group+"/"+feat)
n = ds[group][feat].shape[1]
data = np.array(ds[group][feat][:], copy=False)
for dataset in indices:
for k in range(n):
time_ind = n-k if group == "past_features" else k
scalar_name = feat + "::{}".format(time_ind)
features[dataset][scalar_name] = data[indices[dataset],k]
for dataset in ["train", "valid", "test"]:
past_features[dataset] = | pd.DataFrame.from_dict(past_features[dataset]) | pandas.DataFrame.from_dict |
import pandas as pd
import pandas.testing as pdt
import pytest
from pyspark.sql import functions
from cape_privacy.spark import utils
from cape_privacy.spark.transformations import tokenizer as tkn
def _apply_tokenizer(sess, df, tokenizer, col_to_rename):
df = sess.createDataFrame(df, schema=["name"])
result_df = df.select(tokenizer(functions.col("name")))
return result_df.withColumnRenamed(col_to_rename, "name").toPandas()
def test_tokenizer_simple():
sess = utils.make_session("test.tokenizer.simple")
test_df = pd.DataFrame({"name": ["Alice", "Bob"]})
expected = pd.DataFrame(
{
"name": [
"<KEY>",
"<KEY>",
]
}
)
key = "secret_key"
df = _apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=None, key=key),
col_to_rename="to_token(name)",
)
pdt.assert_frame_equal(df, expected)
def test_tokenizer_is_linkable():
sess = utils.make_session("test.tokenizer.isLinkable")
test_df = pd.DataFrame({"name": ["Alice", "Bob"]})
key1 = "secret_key"
key2 = "secret_key"
df1 = _apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=None, key=key1),
col_to_rename="to_token(name)",
)
df2 = _apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=None, key=key2),
col_to_rename="to_token(name)",
)
pdt.assert_frame_equal(df1, df2)
def test_tokenizer_is_not_linkable():
sess = utils.make_session("test.tokenizer.isNotLinkable")
test_df = pd.DataFrame({"name": ["Alice", "Bob"]})
key1 = "secret_key"
key2 = "not_your_secret_key"
df1 = _apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=None, key=key1),
col_to_rename="to_token(name)",
)
df2 = _apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=None, key=key2),
col_to_rename="to_token(name)",
)
try:
pdt.assert_frame_equal(df1, df2)
raise NotImplemented # noqa: F901
except AssertionError:
pass
except NotImplemented:
raise AssertionError
def test_tokenizer_with_max_token_len():
sess = utils.make_session("test.tokenizer.maxTokenLen")
test_df = pd.DataFrame({"name": ["Alice", "Bob"]})
expected = pd.DataFrame({"name": ["70a4b1a987", "dd4532a296"]})
max_token_len = 10
key = "secret_key"
df = _apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=max_token_len, key=key),
col_to_rename="to_token(name)",
)
pdt.assert_frame_equal(df, expected)
def test_tokenizer_no_key():
sess = utils.make_session("test.tokenizer.maxTokenLen")
test_df = pd.DataFrame({"name": ["Alice", "Bob"]})
_apply_tokenizer(
sess,
test_df,
tkn.Tokenizer(max_token_len=None, key=None),
col_to_rename="to_token(name)",
)
def test_reversible_tokenizer():
sess = utils.make_session("test.tokenizer.reversibleTokenizer")
key = b"5" * 32
plaintext = pd.DataFrame({"name": ["Alice", "Bob"]})
tokenized = _apply_tokenizer(
sess,
plaintext,
tkn.ReversibleTokenizer(key=key),
col_to_rename="to_token(name)",
)
tokenized_expected = pd.DataFrame(
{
"name": [
"<KEY>",
"e0f40aea0d5c21b35967c4231b98b5b3e5338e",
]
}
)
| pdt.assert_frame_equal(tokenized, tokenized_expected) | pandas.testing.assert_frame_equal |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = pd.merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"incompatible merge keys \[0\] .*, must be the same type"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_groupby_multiple_column_with_categorical_column(self):
# GH 16454
df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
result = merge_asof(df, df, on="x", by=["y", "z"])
expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = pd.merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
tm.assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = pd.merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
tm.assert_frame_equal(result, expected)
def test_timedelta_tolerance_nearest(self):
# GH 27642
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
columns=["time", "left"],
)
left["time"] = pd.to_timedelta(left["time"], "ms")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
columns=["time", "right"],
)
right["time"] = pd.to_timedelta(right["time"], "ms")
expected = pd.DataFrame(
list(
zip(
[0, 5, 10, 15, 20, 25],
[0, 1, 2, 3, 4, 5],
[0, np.nan, 2, 4, np.nan, np.nan],
)
),
columns=["time", "left", "right"],
)
expected["time"] = pd.to_timedelta(expected["time"], "ms")
result = pd.merge_asof(
left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
)
tm.assert_frame_equal(result, expected)
def test_int_type_tolerance(self, any_int_dtype):
# GH #28870
left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]})
right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]})
left["a"] = left["a"].astype(any_int_dtype)
right["a"] = right["a"].astype(any_int_dtype)
expected = pd.DataFrame(
{"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]}
)
expected["a"] = expected["a"].astype(any_int_dtype)
result = pd.merge_asof(left, right, on="a", tolerance=10)
tm.assert_frame_equal(result, expected)
def test_merge_index_column_tz(self):
# GH 29864
index = | pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC") | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
import requests as req
import json
import pandas as pd
import warnings
from IPython.display import clear_output
from time import sleep
from abc import *
warnings.filterwarnings("ignore")
class BigwingAPIProcessor(metaclass=ABCMeta) :
''' 빅윙추상클래스 '''
def run(self, limit=True):
pass
def __fetch(self, address) :
pass
def insert(self, data, col) :
'''
검색대상 데이터셋 입력함수
:param data: 데이터셋 (타입 : 데이터프레임)
:param col: 검색 키워드 Column 지정 (타입 : 문자열)
:return: 없음
'''
self._check("url") # 인증키 유효성 확인
# 데이터 유효성 확인 및 삽입
if data.__class__ != | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
===============================================================================
FINANCIAL IMPACT FILE
===============================================================================
Most recent update:
21 January 2019
===============================================================================
Made by:
<NAME>
Copyright:
<NAME>, 2018
For more information, please email:
<EMAIL>
===============================================================================
"""
import numpy as np
import pandas as pd
import sys
sys.path.insert(0, '/***YOUR LOCAL FILE PATH***/CLOVER 4.0/Scripts/Conversion scripts')
from Conversion import Conversion
class Finance():
def __init__(self):
self.location = 'Bahraich'
self.CLOVER_filepath = '/***YOUR LOCAL FILE PATH***/CLOVER 4.0'
self.location_filepath = self.CLOVER_filepath + '/Locations/' + self.location
self.location_inputs = pd.read_csv(self.location_filepath + '/Location Data/Location inputs.csv',header=None,index_col=0)[1]
self.finance_filepath = self.location_filepath + '/Impact/Finance inputs.csv'
self.finance_inputs = pd.read_csv(self.finance_filepath,header=None,index_col=0).round(decimals=3)[1]
self.inverter_inputs = pd.read_csv(self.location_filepath + '/Load/Device load/yearly_load_statistics.csv',index_col=0)
#%%
#==============================================================================
# EQUIPMENT EXPENDITURE (NOT DISCOUNTED)
# Installation costs (not discounted) for new equipment installations
#==============================================================================
# PV array costs
def get_PV_cost(self,PV_array_size,year=0):
'''
Function:
Calculates cost of PV
Inputs:
PV_array_size Capacity of PV being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_cost = PV_array_size * self.finance_inputs.loc['PV cost']
annual_reduction = 0.01 * self.finance_inputs.loc['PV cost decrease']
return PV_cost * (1.0 - annual_reduction)**year
# PV balance of systems costs
def get_BOS_cost(self,PV_array_size,year=0):
'''
Function:
Calculates cost of PV BOS
Inputs:
PV_array_size Capacity of PV being installed
year Installation year
Outputs:
Undiscounted cost
'''
BOS_cost = PV_array_size * self.finance_inputs.loc['BOS cost']
annual_reduction = 0.01 * self.finance_inputs.loc['BOS cost decrease']
return BOS_cost * (1.0 - annual_reduction)**year
# Battery storage costs
def get_storage_cost(self,storage_size,year=0):
'''
Function:
Calculates cost of battery storage
Inputs:
storage_size Capacity of battery storage being installed
year Installation year
Outputs:
Undiscounted cost
'''
storage_cost = storage_size * self.finance_inputs.loc['Storage cost']
annual_reduction = 0.01 * self.finance_inputs.loc['Storage cost decrease']
return storage_cost * (1.0 - annual_reduction)**year
# Diesel generator costs
def get_diesel_cost(self,diesel_size,year=0):
'''
Function:
Calculates cost of diesel generator
Inputs:
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
diesel_cost = diesel_size * self.finance_inputs.loc['Diesel generator cost']
annual_reduction = 0.01 * self.finance_inputs.loc['Diesel generator cost decrease']
return diesel_cost * (1.0 - annual_reduction)**year
# Installation costs
def get_installation_cost(self,PV_array_size,diesel_size,year=0):
'''
Function:
Calculates cost of installation
Inputs:
PV_array_size Capacity of PV being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_installation = PV_array_size * self.finance_inputs.loc['PV installation cost']
annual_reduction_PV = 0.01 * self.finance_inputs.loc['PV installation cost decrease']
diesel_installation = diesel_size * self.finance_inputs.loc['Diesel installation cost']
annual_reduction_diesel = 0.01 * self.finance_inputs.loc['Diesel installation cost decrease']
return PV_installation * (1.0 - annual_reduction_PV)**year + diesel_installation * (1.0 - annual_reduction_diesel)**year
# Miscellaneous costs
def get_misc_costs(self,PV_array_size,diesel_size):
'''
Function:
Calculates cost of miscellaneous capacity-related costs
Inputs:
PV_array_size Capacity of PV being installed
diesel_size Capacity of diesel generator being installed
Outputs:
Undiscounted cost
'''
misc_costs = (PV_array_size + diesel_size) * self.finance_inputs.loc['Misc. costs']
return misc_costs
# Total cost of newly installed equipment
def get_total_equipment_cost(self,PV_array_size,storage_size,diesel_size,year=0):
'''
Function:
Calculates cost of all equipment costs
Inputs:
PV_array_size Capacity of PV being installed
storage_size Capacity of battery storage being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_cost = self.get_PV_cost(PV_array_size,year)
BOS_cost = self.get_BOS_cost(PV_array_size,year)
storage_cost = self.get_storage_cost(storage_size,year)
diesel_cost = self.get_diesel_cost(diesel_size,year)
installation_cost = self.get_installation_cost(PV_array_size,diesel_size,year)
misc_costs = self.get_misc_costs(PV_array_size,diesel_size)
return PV_cost + BOS_cost + storage_cost + diesel_cost + installation_cost + misc_costs
#%%
#==============================================================================
# EQUIPMENT EXPENDITURE (DISCOUNTED)
# Find system equipment capital expenditure (discounted) for new equipment
#==============================================================================
def discounted_equipment_cost(self,PV_array_size,storage_size,diesel_size,year=0):
'''
Function:
Calculates cost of all equipment costs
Inputs:
PV_array_size Capacity of PV being installed
storage_size Capacity of battery storage being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Discounted cost
'''
undiscounted_cost = self.get_total_equipment_cost(PV_array_size,storage_size,diesel_size,year)
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
return undiscounted_cost * discount_fraction
def get_connections_expenditure(self,households,year=0):
'''
Function:
Calculates cost of connecting households to the system
Inputs:
households DataFrame of households from Energy_System().simulation(...)
year Installation year
Outputs:
Discounted cost
'''
households = pd.DataFrame(households)
connection_cost = self.finance_inputs.loc['Connection cost']
new_connections = np.max(households) - np.min(households)
undiscounted_cost = float(connection_cost * new_connections)
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
total_discounted_cost = undiscounted_cost * discount_fraction
# Section in comments allows a more accurate consideration of the discounted
# cost for new connections, but substantially increases the processing time.
# new_connections = [0]
# for t in range(int(households.shape[0])-1):
# new_connections.append(households['Households'][t+1] - households['Households'][t])
# new_connections = pd.DataFrame(new_connections)
# new_connections_daily = Conversion().hourly_profile_to_daily_sum(new_connections)
# total_daily_cost = connection_cost * new_connections_daily
# total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
# Grid extension components
def get_grid_extension_cost(self,grid_extension_distance,year):
'''
Function:
Calculates cost of extending the grid network to a community
Inputs:
grid_extension_distance Distance to the existing grid network
year Installation year
Outputs:
Discounted cost
'''
grid_extension_cost = self.finance_inputs.loc['Grid extension cost'] # per km
grid_infrastructure_cost = self.finance_inputs.loc['Grid infrastructure cost']
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
return grid_extension_distance * grid_extension_cost * discount_fraction + grid_infrastructure_cost
#%%
# =============================================================================
# EQUIPMENT EXPENDITURE (DISCOUNTED) ON INDEPENDENT EXPENDITURE
# Find expenditure (discounted) on items independent of simulation periods
# =============================================================================
def get_independent_expenditure(self,start_year,end_year):
'''
Function:
Calculates cost of equipment which is independent of simulation periods
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
inverter_expenditure = self.get_inverter_expenditure(start_year,end_year)
total_expenditure = inverter_expenditure # ... + other components as required
return total_expenditure
def get_inverter_expenditure(self,start_year,end_year):
'''
Function:
Calculates cost of inverters based on load calculations
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
# Initialise inverter replacement periods
replacement_period = int(self.finance_inputs.loc['Inverter lifetime'])
system_lifetime = int(self.location_inputs['Years'])
replacement_intervals = pd.DataFrame(np.arange(0,system_lifetime,replacement_period))
replacement_intervals.columns = ['Installation year']
# Check if inverter should be replaced in the specified time interval
if replacement_intervals.loc[replacement_intervals['Installation year'].isin(
range(start_year,end_year))].empty == True:
inverter_discounted_cost = float(0.0)
return inverter_discounted_cost
# Initialise inverter sizing calculation
max_power = []
inverter_step = float(self.finance_inputs.loc['Inverter size increment'])
inverter_size = []
for i in range(len(replacement_intervals)):
# Calculate maximum power in interval years
start = replacement_intervals['Installation year'].iloc[i]
end = start + replacement_period
max_power_interval = self.inverter_inputs['Maximum'].iloc[start:end].max()
max_power.append(max_power_interval)
# Calculate resulting inverter size
inverter_size_interval = np.ceil(0.001*max_power_interval / inverter_step) * inverter_step
inverter_size.append(inverter_size_interval)
inverter_size = pd.DataFrame(inverter_size)
inverter_size.columns = ['Inverter size (kW)']
inverter_info = pd.concat([replacement_intervals,inverter_size],axis=1)
# Calculate
inverter_info['Discount rate'] = [(1 - self.finance_inputs.loc['Discount rate']) **
inverter_info['Installation year'].iloc[i] for i in range(len(inverter_info))]
inverter_info['Inverter cost ($/kW)'] = [self.finance_inputs.loc['Inverter cost'] *
(1 - 0.01*self.finance_inputs.loc['Inverter cost decrease'])
**inverter_info['Installation year'].iloc[i] for i in range(len(inverter_info))]
inverter_info['Discounted expenditure ($)'] = [inverter_info['Discount rate'].iloc[i] *
inverter_info['Inverter size (kW)'].iloc[i] * inverter_info['Inverter cost ($/kW)'].iloc[i]
for i in range(len(inverter_info))]
inverter_discounted_cost = np.sum(inverter_info.loc[inverter_info['Installation year'].
isin(np.array(range(start_year,end_year)))
]['Discounted expenditure ($)']).round(2)
return inverter_discounted_cost
#%%
#==============================================================================
# EXPENDITURE (DISCOUNTED) ON RUNNING COSTS
# Find expenditure (discounted) incurred during the simulation period
#==============================================================================
def get_kerosene_expenditure(self,kerosene_lamps_in_use_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of kerosene usage
Inputs:
kerosene_lamps_in_use_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
kerosene_cost = kerosene_lamps_in_use_hourly * self.finance_inputs.loc['Kerosene cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(kerosene_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_kerosene_expenditure_mitigated(self,kerosene_lamps_mitigated_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of kerosene usage that has been avoided by using the system
Inputs:
kerosene_lamps_mitigated_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
kerosene_cost = kerosene_lamps_mitigated_hourly * self.finance_inputs.loc['Kerosene cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(kerosene_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_grid_expenditure(self,grid_energy_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of grid electricity used by the system
Inputs:
grid_energy_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
grid_cost = grid_energy_hourly * self.finance_inputs.loc['Grid cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(grid_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_diesel_fuel_expenditure(self,diesel_fuel_usage_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of diesel fuel used by the system
Inputs:
diesel_fuel_usage_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
diesel_fuel_usage_daily = Conversion().hourly_profile_to_daily_sum(diesel_fuel_usage_hourly)
start_day = start_year * 365
end_day = end_year * 365
diesel_price_daily = []
original_diesel_price = self.finance_inputs.loc['Diesel fuel cost']
r_y = 0.01 * self.finance_inputs.loc['Diesel fuel cost decrease']
r_d = ((1.0 + r_y) ** (1.0/365.0)) - 1.0
for t in range(start_day,end_day):
diesel_price = original_diesel_price * (1.0 - r_d)**t
diesel_price_daily.append(diesel_price)
diesel_price_daily = pd.DataFrame(diesel_price_daily)
total_daily_cost = pd.DataFrame(diesel_fuel_usage_daily.values * diesel_price_daily.values)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
#%%
#==============================================================================
# OPERATION AND MAINTENANCE EXPENDITURE (DISCOUNTED)
# Find O&M costs (discounted) incurred during simulation
#==============================================================================
# PV O&M for entire PV array
def get_PV_OM(self,PV_array_size,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of PV the simulation period
Inputs:
PV_array_size Capacity of PV installed
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
PV_OM_cost = PV_array_size * self.finance_inputs.loc['PV O&M'] # $ per year
PV_OM_cost_daily = PV_OM_cost / 365.0 # $ per day
total_daily_cost = pd.DataFrame([PV_OM_cost_daily]*(end_year-start_year)*365)
return self.discounted_cost_total(total_daily_cost,start_year,end_year)
# Storage O&M for entire storage system
def get_storage_OM(self,storage_size,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of storage the simulation period
Inputs:
storage_size Capacity of battery storage installed
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
storage_OM_cost = storage_size * self.finance_inputs.loc['Storage O&M'] # $ per year
storage_OM_cost_daily = storage_OM_cost / 365.0 # $ per day
total_daily_cost = pd.DataFrame([storage_OM_cost_daily]*(end_year-start_year)*365)
return self.discounted_cost_total(total_daily_cost,start_year,end_year)
# Diesel O&M for entire diesel genset
def get_diesel_OM(self,diesel_size,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of diesel generation the simulation period
Inputs:
diesel_size Capacity of diesel generator installed
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
diesel_OM_cost = diesel_size * self.finance_inputs.loc['Diesel O&M'] # $ per year
diesel_OM_cost_daily = diesel_OM_cost / 365.0 # $ per day
total_daily_cost = pd.DataFrame([diesel_OM_cost_daily]*(end_year-start_year)*365)
return self.discounted_cost_total(total_daily_cost,start_year,end_year)
# General O&M for entire energy system (e.g. for staff, land hire etc.)
def get_general_OM(self,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of general components the simulation period
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
general_OM_cost = self.finance_inputs.loc['General O&M'] # $ per year
general_OM_cost_daily = general_OM_cost / 365.0 # $ per day
total_daily_cost = | pd.DataFrame([general_OM_cost_daily]*(end_year-start_year)*365) | pandas.DataFrame |
from datetime import datetime
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils.matrix_convert import MatrixConversion
from calculations.AllMetrics import Metrics
from utils.constants import TYPES
from utils.helpers import remove_offset_from_julian_date
from params import summer_params
from params import fall_params
from params import spring_params
from params import winter_params
def upload_files(start_date, files, flow_class):
output_files = 'user_output_files'
for file in files:
file_name = output_files + '/' + file.split('/')[1].split('.csv')[0]
dataset = read_csv_to_arrays(file)
matrix = MatrixConversion(
dataset['date'], dataset['flow'], start_date)
julian_start_date = datetime.strptime(
"{}/2001".format(start_date), "%m/%d/%Y").timetuple().tm_yday
result = get_result(matrix, julian_start_date, int(flow_class))
write_to_csv(file_name, result, 'annual_flow_matrix')
write_to_csv(file_name, result, 'drh')
write_to_csv(file_name, result, 'annual_flow_result')
write_to_csv(file_name, result, 'parameters', flow_class)
# draw_plots(file_name, result)
return True
def get_result(matrix, julian_start_date, flow_class):
result = {}
result["year_ranges"] = [int(i) + 1 for i in matrix.year_array]
result["flow_matrix"] = np.where(
pd.isnull(matrix.flow_matrix), None, matrix.flow_matrix).tolist()
result["start_date"] = matrix.start_date
calculated_metrics = Metrics(
matrix.flow_matrix, matrix.years_array, None, None, None, flow_class)
result["DRH"] = calculated_metrics.drh
result["all_year"] = {}
result["all_year"]["average_annual_flows"] = calculated_metrics.average_annual_flows
result["all_year"]["standard_deviations"] = calculated_metrics.standard_deviations
result["all_year"]["coefficient_variations"] = calculated_metrics.coefficient_variations
result["winter"] = {}
# Convert key from number to names
key_maps = {50: "fifty", 20: "twenty", 10: "ten", 5: "five", 2: "two"}
# key_maps = {2: "two", 5: "five", 10: "ten", 20: "twenty", 12: "_two", 15: "_five", 110: "_ten", 120: "_twenty"}
winter_timings = {}
winter_durations = {}
winter_magnitudes = {}
winter_frequencys = {}
for key, value in key_maps.items():
winter_timings[value] = list(map(
remove_offset_from_julian_date, calculated_metrics.winter_timings[key], itertools.repeat(julian_start_date)))
winter_timings[value +
'_water'] = calculated_metrics.winter_timings[key]
winter_durations[value] = calculated_metrics.winter_durations[key]
winter_magnitudes[value] = calculated_metrics.winter_magnitudes[key]
winter_frequencys[value] = calculated_metrics.winter_frequencys[key]
result["winter"]["timings"] = winter_timings
result["winter"]["magnitudes"] = winter_magnitudes
result["winter"]["durations"] = winter_durations
result["winter"]["frequencys"] = winter_frequencys
result["fall"] = {}
# result["fall"]["timings_julian"] = list(map(
# remove_offset_from_julian_date, calculated_metrics.fall_timings, itertools.repeat(julian_start_date)))
result["fall"]["magnitudes"] = calculated_metrics.fall_magnitudes
result["fall"]["timings_water"] = calculated_metrics.fall_timings
result["fall"]["durations"] = calculated_metrics.fall_durations
result["summer"] = {}
result["summer"]["magnitudes_fifty"] = calculated_metrics.summer_50_magnitudes
result["summer"]["magnitudes_ninety"] = calculated_metrics.summer_90_magnitudes
result["summer"]["timings_water"] = calculated_metrics.summer_timings
# result["summer"]["timings_julian"] = list(map(
# remove_offset_from_julian_date, calculated_metrics.summer_timings, itertools.repeat(julian_start_date)))
result["summer"]["durations_wet"] = calculated_metrics.summer_wet_durations
# result["summer"]["durations_flush"] = calculated_metrics.summer_flush_durations
result["summer"]["no_flow_counts"] = calculated_metrics.summer_no_flow_counts
result["spring"] = {}
result["spring"]["magnitudes"] = calculated_metrics.spring_magnitudes
# result["spring"]["timings_julian"] = list(map(
# remove_offset_from_julian_date, calculated_metrics.spring_timings, itertools.repeat(julian_start_date)))
result["spring"]["timings_water"] = calculated_metrics.spring_timings
result["spring"]["durations"] = calculated_metrics.spring_durations
result["spring"]["rocs"] = calculated_metrics.spring_rocs
result["wet"] = {}
result["wet"]["baseflows_10"] = calculated_metrics.wet_baseflows_10
result["wet"]["baseflows_50"] = calculated_metrics.wet_baseflows_50
# result["fall"]["wet_timings_julian"] = list(map(
# remove_offset_from_julian_date, calculated_metrics.fall_wet_timings, itertools.repeat(julian_start_date)))
result["wet"]["wet_timings_water"] = calculated_metrics.fall_wet_timings
result["wet"]["bfl_durs"] = calculated_metrics.wet_bfl_durs
return result
def write_to_csv(file_name, result, file_type, *args):
year_ranges = ",".join(str(year) for year in result['year_ranges'])
if file_type == 'annual_flow_matrix':
a = np.array(result['flow_matrix'])
np.savetxt(file_name + '_' + file_type + '.csv', a, delimiter=',',
header=year_ranges, fmt='%s', comments='')
if file_type == 'drh':
dataset = []
for key, value in result['DRH'].items():
data = value
data.insert(0, key)
dataset.append(data)
a = np.array(dataset)
np.savetxt(file_name + '_' + file_type +
'.csv', a, delimiter=',', fmt='%s', comments='')
if file_type == 'annual_flow_result':
# remove summer no_flow from main output but save it for supplementary outputs
summer_no_flow = result['summer']['no_flow_counts']
del result['summer']['no_flow_counts']
dataset = []
# dict_to_array(result['all_year'], 'all_year', dataset)
dict_to_array(result['fall'], 'fall', dataset)
dict_to_array(result['wet'], 'wet', dataset)
dict_to_array(result['winter'], 'winter', dataset)
dict_to_array(result['spring'], 'spring', dataset)
dict_to_array(result['summer'], 'summer', dataset)
a = np.array(dataset)
np.savetxt(file_name + '_' + file_type + '.csv', a, delimiter=',',
fmt='%s', header='Year, ' + year_ranges, comments='')
"""Create supplementary metrics file"""
supplementary = []
supplementary.append(['Avg'] + result['all_year']
['average_annual_flows'])
supplementary.append(['Std'] + result['all_year']
['standard_deviations'])
supplementary.append(['CV'] + result['all_year']
['coefficient_variations'])
supplementary.append(['DS_No_Flow'] + summer_no_flow)
np.savetxt(file_name + '_supplementary_metrics.csv', supplementary, delimiter=',',
fmt='%s', header='Year, ' + year_ranges, comments='')
if file_type == 'parameters':
now = datetime.now()
timestamp = now.strftime("%m/%d/%Y, %H:%M")
flow_class = args
cols = {'Date_time': timestamp, 'Stream_class': flow_class[0]}
df = | pd.DataFrame(cols, index=[0]) | pandas.DataFrame |
""" ### Utilities
A rather bare script, just for labeling new images if you have them.
"""
import os
from skimage import io
import pandas as pd
def label():
"A simple function for adding new data"
files = sorted(os.listdir(config.DATA_DIR))
tot = len(files)
y = []
for i, f in enumerate(files):
file_path = os.path.join(data_path, f)
io.imshow(file_path)
inp = input(f"{i} of {tot}. Number of squares in image: ")
try:
n = int(inp)
y.append(n)
except Exception:
raise Exception
df_out = | pd.DataFrame({"filenames": files, "target": y}) | pandas.DataFrame |
import csv
import json
from glob import glob
from pprint import pprint
import pandas
from numpy import mean
files = glob('*.json')
results = {}
for file in files:
name = file.split(".")[0].split("_")
name = name[1] + " " + name[2]
data = json.load(open(file))
accuracy = mean([max(run["acc"]) for run in data])
uw_accuracy = mean([max(run["un_acc"]) for run in data])
f1 = mean([max(run["f1"]) for run in data])
results[name] = {
"accuracy": accuracy,
"uw_accuracy": uw_accuracy,
"f1": f1,
}
data = | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from Bio import PDB
repository = PDB.PDBList()
parser = PDB.PDBParser()
repository.retrieve_pdb_file('1TUP', pdir='.', file_format='pdb')
p53_1tup = parser.get_structure('P 53', 'pdb1tup.ent')
my_residues = set()
for residue in p53_1tup.get_residues():
my_residues.add(residue.id[0])
print(my_residues)
def get_mass(atoms, accept_fun=lambda atom: atom.parent.id[0] != 'W'):
return sum([atom.mass for atom in atoms if accept_fun(atom)])
chain_names = [chain.id for chain in p53_1tup.get_chains()]
my_mass = np.ndarray((len(chain_names), 3))
for i, chain in enumerate(p53_1tup.get_chains()):
my_mass[i, 0] = get_mass(chain.get_atoms())
my_mass[i, 1] = get_mass(chain.get_atoms(), accept_fun=lambda atom: atom.parent.id[0] not in [' ', 'W'])
my_mass[i, 2] = get_mass(chain.get_atoms(), accept_fun=lambda atom: atom.parent.id[0] == 'W')
masses = | pd.DataFrame(my_mass, index=chain_names, columns=['No Water', 'Zincs', 'Water']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import pandas as pd
import numpy as np
import pickle
import os.path
import dateutil.parser
import calendar
from datetime import datetime
# =============================================================================
# Scheduling tool
# =============================================================================
def autoallocate(file, allocate_type='single', filename='', export_to='xlsx'):
"""Read and parse a downloaded doode poll (in '.xls' or '.xlsx') where participants are
able to choose as many timeslots as possible. Automatically allocate participants to a
slot based on their chosen availabilities. Returns dataframe containing the participants'
allocated slots.
Parameters
----------
file : str
Path containing doodle poll file
allocate_type : str
The type of allocation. If 'single', allocates one unique slot to each participant. If
'multiple', allocates multiple slots to each participant.
transpose : bool
Exported dataframe will be in long format if True.
filename : str
Name of the file containing the participants' allocations.
export_to : str
Exported file type. Can be 'xlsx' or 'csv'. Can also be set to 'False', which will
simply return the dataframe of allocations.
Examples
--------
>>> import autocalendar
>>> file = 'doodle_poll.xls' # from the data folder in the repo
>>> autocalendar.autoallocate(file, allocate_type='multiple', export_to=False)
"""
# Read and parse doodle poll
poll = pd.read_excel(file)
poll.loc[2:4,].fillna(method='ffill', axis=1, inplace=True)
poll = poll.set_index(poll.columns[0])
# Extract all possible datetimes
datetimes = []
dates = []
times = []
for month, date, time in zip(poll.iloc[2], poll.iloc[3], poll.iloc[4]):
exact_date = month + ' ' + date
parsed_date = dateutil.parser.parse(exact_date)
# day = calendar.day_name[parsed_date.weekday()]
dt = parsed_date.strftime("%d/%m/%y") + ', ' + time
datetimes.append(dt)
dates.append(parsed_date)
# days.append(day)
times.append(time)
poll.columns = datetimes
poll = poll.iloc[5:]
# Create empty df for appending assigned dates
empty_df = pd.DataFrame(index=['Timeslots', 'Participant'], columns=datetimes)
empty_df.iloc[0] = times
# Allocate slots
assignments = []
# single allocations
if allocate_type == 'single':
for assigned_date in poll.columns:
# Number of subjects who chose the slot
n_selections = poll[assigned_date].astype(str).str.contains('OK').sum()
if n_selections == 0:
empty_df[assigned_date].Participant = 'No One Assigned'
elif n_selections == 1:
single_name = poll[poll[assigned_date] == 'OK'].index.values[0]
if single_name not in assignments:
# If subject has not been assigned yet
empty_df[assigned_date].Participant = single_name
assignments.append(single_name)
else:
empty_df[assigned_date].Participant = 'No One Assigned'
elif n_selections > 1:
multiple_names = poll[poll[assigned_date] == 'OK'].index.values
chosen_name = np.random.choice(multiple_names)
if chosen_name not in assignments:
empty_df[assigned_date].Participant = chosen_name
assignments.append(chosen_name)
else:
chosen_name_2 = np.random.choice(multiple_names[multiple_names != chosen_name])
if chosen_name_2 not in assignments:
empty_df[assigned_date].Participant = chosen_name_2
assignments.append(chosen_name_2)
else:
empty_df[assigned_date].Participant = 'No One Assigned'
# multiple allocations
elif allocate_type == 'multiple':
for assigned_date in poll.columns:
n_selections = poll[assigned_date].astype(str).str.contains('OK').sum()
if n_selections == 0:
empty_df[assigned_date].Participant = 'No One Assigned'
elif n_selections == 1:
single_name = poll[poll[assigned_date] == 'OK'].index.values[0]
empty_df[assigned_date].Participant = single_name
assignments.append(single_name)
elif n_selections > 1:
multiple_names = poll[poll[assigned_date] == 'OK'].index.values
chosen_name = np.random.choice(multiple_names[multiple_names != max(enumerate(assignments))[1]])
empty_df[assigned_date].Participant = chosen_name
assignments.append(chosen_name)
# prepare output
allocations = empty_df.copy()
allocations.columns = allocations.columns.str.split(', ').str[0] # Only dates as header row
allocations = pd.DataFrame.transpose(allocations)
allocations = allocations.rename(columns={allocations.columns[0]: 'Timeslots' })
allocations.index.names = ['Date']
allocations = allocations.reset_index(level='Date')
# Export
if export_to == 'csv':
allocations.to_csv(filename + '.csv', index=False)
elif export_to == 'xlsx':
allocations.to_excel(filename + '.xlsx', index=False)
elif export_to is False:
return allocations
# Feedback
participants = poll.index[poll.index != 'Count'].tolist()
for participant in participants:
if participant not in assignments:
print(f'{participant}' + ' could not be allocated.')
if len(np.intersect1d(participants, assignments)) == len(participants):
print('All participants successfully allocated.')
# =============================================================================
# Initialize Google API Console Credentials
# =============================================================================
def setup_oath(token_path, client_path):
"""
Path containing token.pkl and client_secret.json respectively.
"""
# Set up credentials
scopes = ['https://www.googleapis.com/auth/calendar']
# Token generated after first time code is run.
if os.path.exists(token_path):
with open(token_path, 'rb') as token:
credentials = pickle.load(token)
else:
credentials = None
# If there are no (valid) credentials available, log in and enter authorization code manually
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_path, scopes=scopes)
credentials = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_path, 'wb') as token:
pickle.dump(credentials, token)
service = build("calendar", "v3", credentials=credentials)
return service
# =============================================================================
# Get event information
# =============================================================================
def preprocess_file(file, header_row=1):
"""Tidy excel sheet containing participants' particulars.
If there are multiple header rows, denote the header row to be selected in `header_row` (defaults to 1). For example, `header_row=2` specifies the second row as the main column names of interest, which will be needed in `extract_info()`.
"""
participants = | pd.read_excel(file) | pandas.read_excel |
'''
Created on 14.01.2022
@author: <NAME> @UOL/OFFIS
@review: <NAME> @OFFIS
This set of functions model the different power plants and their outputs
in the different markets
#1 Define Power Plants Scenarios
#2 Define Market Scenarios
#3 Build Energy Systems which consider the different Scenarios
#4 Combine Scenarios and present results in a nice dataframe/csv
#5 Dump the scenarios as .oemof files
#6 Save results as graphics
'''
from enum import Enum
from oemof.solph import (EnergySystem, Bus, Sink, Source, Flow)
import pandas as pd
from examples.common import EXAMPLES_DATA_DIR, EXAMPLES_PLOTS_DIR
import matplotlib.pyplot as plt
from examples.district_model_4_markets import get_district_dataframe,\
solve_model, post_process_results
from os.path import join
import logging
try:
from electricity_markets.market_price_generator import create_markets_info
from electricity_markets.electricity_market_constraints import build_model_and_constraints
except Exception:
from src.electricity_markets.market_price_generator import create_markets_info
from src.electricity_markets.electricity_market_constraints import build_model_and_constraints
class PowerPlants(Enum):
'''
Listing of the different power plants available
'''
COAL = 1
GAS = 2
BIOGAS = 3
PV = 4
WIND = 5
def get_boundary_data(year=2020, days=366):
'''
Constructs dataframes with the information for modelling
:param year: Year under consideration
:param days: Days to model. Default is 366 for a leap year
'''
district_df = get_district_dataframe(year=year).head(24 * 4 * days)
# Create Energy System with the dataframe time series
market_data = create_markets_info(
year=year, save_csv=False).head(
days * 24 * 4)
return district_df, market_data
def create_energy_system(scenario, district_df, market_data):
'''
Creates an oemof energy system for the input scenario
:param scenario: One of the PowerPlants Scenario
:param district_df: Dataframe with the district information
:param market_data: Dataframe with market prices for each market
'''
meta_data = {}
# Variable costs information, EUR/MWh
meta_data["cv"] = {"coal": 43.92,
"gas": 46.17,
"biogas": 68.15,
"pv": 0,
"wind": 0}
# Max energy values for Renewables based on Installed capacity of 1MW and
# real production as a fraction of 1MW
meta_data["max_energy"] = {
"coal": 1, # MW
"gas": 1, # MW
"biogas": 1, # MW
"wind": district_df["Wind_pu"].values, # MW
"pv": district_df["PV_pu"].values, # MW
}
energy_system = EnergySystem(timeindex=district_df.index)
label = scenario.name.lower()
# create Bus
b_el = Bus(label="b_el_out")
# create Source
source = Source(label="source", outputs={b_el: Flow(
nominal_value=1,
max=meta_data["max_energy"][label],
variable_costs=meta_data["cv"][label])})
# The markets each are modelled as a sink
s_day_ahead = Sink(
label="s_da",
inputs={b_el: Flow(variable_costs=-market_data["day_ahead"].values)})
s_intraday = Sink(
label="s_id",
inputs={b_el: Flow(variable_costs=-market_data["intra_day"].values)})
s_future_base = Sink(
label="s_fb",
inputs={b_el: Flow(variable_costs=-market_data["future_base"].values)})
s_future_peak = Sink(
label="s_fp",
inputs={b_el: Flow(variable_costs=-market_data["future_peak"].values)})
energy_system.add(
b_el,
source,
s_day_ahead,
s_future_base,
s_future_peak,
s_intraday)
return energy_system
def calculate_kpis(results, market_data):
'''
Calculate a set of KPIs and return them as dataframe
:param results: Results dataframe
:param market_data: Market dataframe
'''
total_energy = results.sum() / 4 # Since it it was in 15min intervals
income = {
"income, da": results["b_el_out, s_da"].values *
market_data["day_ahead"].values,
"income, id": results["b_el_out, s_id"].values *
market_data["intra_day"].values,
"income, fb": results["b_el_out, s_fb"].values *
market_data["future_base"].values,
"income, fp": results["b_el_out, s_fp"].values *
market_data["future_peak"].values}
income["income, total"] = income["income, da"] + \
income["income, id"] + income["income, fb"] + income["income, fp"]
income_total = {k: round(v.sum() / 4, 1) for k, v in income.items()}
income_total["average_price EUR/MWh"] = income_total["income, total"] / \
total_energy["source, b_el_out"]
income_total = pd.Series(income_total)
kpis = total_energy.append(income_total)
return kpis
def model_power_plant_scenario(scenario, district_df, market_data, days=365):
'''
Model an scenario and calculate KPIs based on the given boundary data
:param scenario: Scenario from PowerPlants
:param district_df: Dataframe with information of the District
:param market_data: Market Data with electricity price information
:param days: Number of days to model, starting on 01/01
'''
es = create_energy_system(scenario, district_df, market_data)
model = build_model_and_constraints(es)
solved_model = solve_model(model)
results = post_process_results(solved_model)
kpis = calculate_kpis(results, market_data)
return results, kpis
def solve_and_write_data(year=2020, days=365):
'''
Solve the different scenarios and write the data to a XLSX
:param year: Year of data
:param days: Number of days to model, starting on 01/01
'''
data_path = join(EXAMPLES_DATA_DIR, 'PowerPlantsModels.xlsx')
writer = | pd.ExcelWriter(data_path, engine='xlsxwriter') | pandas.ExcelWriter |
import sqlite3
import datetime
import os
import pandas as pd
import numpy as np
# --------------------------------------------------------------------------------------------
# DATA QUERY FUNCTIONS
def retrieve_accounts(gnucash_file, build_fullname=False) -> pd.DataFrame:
# get all account data from the sqlite3 db
conn = sqlite3.connect(gnucash_file)
df = pd.read_sql_query("SELECT * FROM accounts", conn)
# rename "name" to "account_name", as the former might cause problems
# with the pd.Series.name attribute...
df.rename(columns={"name": "account_name"}, inplace=True)
if build_fullname:
# create full names for each account
# for each account, trace the route to the root account and add all intermediate accounts
full_names = []
for _, row in df.iterrows():
# if this is already a root, do nothing
if row.account_type == "ROOT":
full_names.append("")
continue
# trace path up to root
full_account_name = row["account_name"]
cur_parent_row = df[df.guid == row["parent_guid"]].iloc[0]
while cur_parent_row["account_type"] != "ROOT":
# prepend parent name
full_account_name = cur_parent_row["account_name"] + "/" + full_account_name
# go up to parent
cur_parent_row = df[df.guid == cur_parent_row["parent_guid"]].iloc[0]
# append name
full_names.append(full_account_name)
# add unique names to df
df["full_name"] = full_names
return df
def retrieve_income_expense_transactions(gnucash_filename) -> pd.DataFrame:
conn = sqlite3.connect(gnucash_filename)
query = """
SELECT
transactions.post_date AS tx_date,
splits.quantity_num AS quantity_num,
splits.quantity_denom AS quantity_denom,
accounts.name as account_name,
accounts.account_type as account_type,
accounts.guid as account_guid,
parent_acc.guid as parent_guid,
transactions.description as desc
FROM transactions
INNER JOIN splits
ON splits.tx_guid=transactions.guid
INNER JOIN accounts
ON splits.account_guid=accounts.guid
LEFT JOIN accounts AS parent_acc
ON accounts.parent_guid=parent_acc.guid
WHERE
accounts.account_type IN ('INCOME', 'EXPENSE')
"""
df = pd.read_sql_query(query, conn)
# now fix us a small issues with the dating and amounts:
df.tx_date = pd.to_datetime(df.tx_date)
df["ym"] = df.tx_date.dt.to_period("M")
df["amount"] = df.quantity_num / df.quantity_denom
del df["quantity_num"]
del df["quantity_denom"]
# amount in income transactions is always negative. Flip those.
df.amount = np.where(df.account_type == "INCOME", -df.amount, df.amount)
return df
def retrieve_account_transactions(gnucash_filename, account_guid):
conn = sqlite3.connect(gnucash_filename)
query = """
SELECT
transactions.post_date AS tx_date,
splits.quantity_num AS quantity_num,
splits.quantity_denom AS quantity_denom,
accounts.name as account_name,
accounts.account_type as account_type,
accounts.guid as account_guid,
transactions.description as desc
FROM transactions
INNER JOIN splits
ON splits.tx_guid=transactions.guid
INNER JOIN accounts
ON splits.account_guid=accounts.guid
WHERE
accounts.guid='{}'
""".format(account_guid)
df = pd.read_sql_query(query, conn)
# now fix us a small issues with the dating and amounts:
df.tx_date = | pd.to_datetime(df.tx_date) | pandas.to_datetime |
import numpy as np
import pandas as pd
from gmm_model_fit import gmm_model_fit
def get_fish_info(df):
fishes_IDs = df.index.get_level_values('fish_ID').unique().values
df["distance_to_center"] = np.sqrt(df["bout_x"]**2 + df["bout_y"]**2)
df["correct"] = df["heading_angle_change"].values > 0
extracted_features = []
extracted_binned_features = []
extracted_binned_features_same_direction = []
extracted_binned_features_heading_angle_change_histograms = []
extracted_binned_features_inter_bout_interval_histograms = []
for fish_ID in fishes_IDs:
# Each fish only has a single genotype, so take the first value from the other index level
genotype = df.loc[fish_ID, :, :, :].index.get_level_values(1).values[0]
for stim_ID in range(4):
extracted_features.append([fish_ID,
genotype,
stim_ID,
df.loc[fish_ID, :, :, stim_ID].query("bout_time >= 10 and bout_time < 20 and distance_to_center < 0.95")["correct"].mean()*100,
df.loc[fish_ID, :, :, stim_ID].query("bout_time >= 10 and bout_time < 20 and distance_to_center < 0.95")["inter_bout_interval"].mean()])
for bin in [7, 9, 11, 13, 15, 17, 19, 21, 23]:
values_in_bin = df.loc[fish_ID, :, :, stim_ID].query("bout_time >= @bin - 1 and bout_time < @bin + 1 and distance_to_center < 0.95")["correct"]
extracted_binned_features.append([fish_ID,
genotype,
stim_ID,
bin,
values_in_bin.mean()*100 if len(values_in_bin) > 6 else np.nan])
if stim_ID == 0:
for bin in [0.125, 0.375, 0.625, 0.875, 1.125, 1.375]:
values_in_bin = df.loc[fish_ID, :, :, stim_ID].query("bout_time >= 5 and inter_bout_interval >= @bin - 0.125 and inter_bout_interval < @bin + 0.125 and distance_to_center < 0.95")["same_as_previous"]
extracted_binned_features_same_direction.append([fish_ID,
genotype,
bin,
values_in_bin.mean()*100 if len(values_in_bin) > 6 else np.nan])
# Histogram of angle change
hist, bin_edges = np.histogram(df.loc[fish_ID, :, :, stim_ID].query("bout_time >= 10 and bout_time < 20 and distance_to_center < 0.95")["heading_angle_change"],
bins=np.linspace(-120, 120, 40), density=False)
hist = hist / hist.sum() # Make it a probability histogram
for i in range(len(hist)):
extracted_binned_features_heading_angle_change_histograms.append([fish_ID, genotype, stim_ID, (bin_edges[i] + bin_edges[i + 1]) / 2, hist[i]])
# Histogram of inter-bout interval
hist, bin_edges = np.histogram(df.loc[fish_ID, :, :, stim_ID].query("bout_time >= 10 and bout_time < 20 and distance_to_center < 0.95")["inter_bout_interval"],
bins=np.linspace(0, 5, 40), density=False)
hist = hist / hist.sum() # Make it a probability histogram
for i in range(len(hist)):
extracted_binned_features_inter_bout_interval_histograms.append([fish_ID, genotype, stim_ID, (bin_edges[i] + bin_edges[i + 1]) / 2, hist[i]])
df_extracted_features = pd.DataFrame(extracted_features, columns=["fish_ID",
"genotype",
"stim",
"correctness",
"inter_bout_interval"])
df_extracted_features.set_index(['fish_ID', "genotype", 'stim'], inplace=True)
df_extracted_features.sort_index(inplace=True)
###############
df_extracted_binned_features = pd.DataFrame(extracted_binned_features, columns=["fish_ID",
"genotype",
"stim",
"bin",
"correctness"])
df_extracted_binned_features.set_index(['fish_ID', "genotype", 'stim', "bin"], inplace=True)
df_extracted_binned_features.sort_index(inplace=True)
###############
df_extracted_binned_features_same_direction = pd.DataFrame(extracted_binned_features_same_direction, columns=["fish_ID",
"genotype",
"bin",
"same_direction"])
df_extracted_binned_features_same_direction.set_index(["fish_ID", "genotype", "bin"], inplace=True)
df_extracted_binned_features_same_direction.sort_index(inplace=True)
###############
df_extracted_binned_features_heading_angle_change_histograms = pd.DataFrame(extracted_binned_features_heading_angle_change_histograms,
columns=["fish_ID",
"genotype",
"stim",
"bin",
"probability"])
df_extracted_binned_features_heading_angle_change_histograms.set_index(["fish_ID", "genotype", "stim", "bin"], inplace=True)
df_extracted_binned_features_heading_angle_change_histograms.sort_index(inplace=True)
###############
df_extracted_binned_features_inter_bout_interval_histograms = pd.DataFrame(extracted_binned_features_inter_bout_interval_histograms,
columns=["fish_ID",
"genotype",
"stim",
"bin",
"probability"])
df_extracted_binned_features_inter_bout_interval_histograms.set_index(["fish_ID", "genotype", "stim", "bin"], inplace=True)
df_extracted_binned_features_inter_bout_interval_histograms.sort_index(inplace=True)
df = df_extracted_binned_features_heading_angle_change_histograms.groupby(["stim", "bin"]).mean()
gmm_fitting_results = []
for stim_ID in range(4):
bins = df.loc[stim_ID, :]["probability"].index.values
probabilities = df.loc[stim_ID, :]["probability"].values
fit_w, fit_m, fit_s = gmm_model_fit(bins, probabilities)
gmm_fitting_results.append([stim_ID] + list(fit_w) + list(fit_m) + list(fit_s))
df_gmm_fitting_results = | pd.DataFrame(gmm_fitting_results, columns=["stim", "w_left", "w_center", "w_right", "m_left", "m_center", "m_right", "s_left", "s_center", "s_right"]) | pandas.DataFrame |
from sklearn.model_selection import StratifiedKFold
import pandas as pd
skf = StratifiedKFold(n_splits=10, random_state=48, shuffle=True)
def CV(predictors,target):
for fold, (train_index, test_index) in enumerate(skf.split(predictors, target)):
x_train, x_valid = pd.DataFrame(predictors.iloc[train_index]), | pd.DataFrame(predictors.iloc[test_index]) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: | pd.Timestamp("2012-12-22 00:00:00") | pandas.Timestamp |
# import start
import ast
import asyncio
import calendar
import platform
import subprocess as sp
import time
import traceback
import xml.etree.ElementTree as Et
from collections import defaultdict
from datetime import datetime
import math
import numpy as np
import pandas as pd
from Utility.CDPConfigValues import CDPConfigValues
from Utility.Utilities import Utilities
from Utility.WebConstants import WebConstants
from WebConnection.WebConnection import WebConnection
# import end
## Function to reverse a string
#def reverse(string):
# string = string[::-1]
# return string
class Preprocessor:
""" Preprocessor class is used for preparing the extracted data to be fed to the training algorithm
for further processing.
"""
def __init__(self, project, previous_preprocessed_df=None, preprocessed=None):
"""
:param timestamp_column: Contains the committer timestamp
:type timestamp_column: str
:param email_column: Contains the committer timestamp
:type email_column: str
:param project: project key to be processed
:type project: str
:param project_name: project name to be processed
:type project_name: str
:param web_constants: Constants load from file
:type web_constants: class WebConstants
:param base_timestamp: Instantiating committer timestamp
:type base_timestamp: str
:param developer_stats_df: creating dataframe variable for developer stats
:type developer_stats_df: pandas dataframe
:param developer_sub_module_stats_df: creating dataframe variable for developer sub module stats
:type developer_sub_module_stats_df: pandas dataframe
"""
self.timestamp_column = "COMMITTER_TIMESTAMP"
self.email_column = "COMMITTER_EMAIL"
self.project = project
self.project_name = CDPConfigValues.configFetcher.get('name', project)
self.web_constants = WebConstants(project)
self.base_timestamp = ""
self.developer_stats_df = ""
self.developer_sub_module_stats_df = ""
if preprocessed is None:
if previous_preprocessed_df is None:
self.file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
self.github_data_dump_df = pd.read_csv(
f"{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.commit_details_file_name}")
self.pre_processed_file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
CDPConfigValues.create_directory(self.pre_processed_file_path)
self.stats_dataframe = pd.DataFrame()
self.sub_module_list = list()
else:
self.file_path = f"{CDPConfigValues.schedule_file_path}/{self.project_name}"
self.github_data_dump_df = | pd.DataFrame(previous_preprocessed_df) | pandas.DataFrame |
"""This module contains auxiliary functions for RD predictions used in the main notebook."""
import json
import matplotlib as plt
import pandas as pd
import numpy as np
import statsmodels as sm
from auxiliary.auxiliary_predictions import *
from auxiliary.auxiliary_plots import *
from auxiliary.auxiliary_tables import *
def prepare_data(data):
"""
Adds variables needed for analysis to data.
"""
# Add constant to data to use in regressions later.
data.loc[:, "const"] = 1
# Add dummy for being above the cutoff in next GPA
data["nextGPA_above_cutoff"] = np.NaN
data.loc[data.nextGPA >= 0, "nextGPA_above_cutoff"] = 1
data.loc[data.nextGPA < 0, "nextGPA_above_cutoff"] = 0
# Add dummy for cumulative GPA being above the cutoff
data["nextCGPA_above_cutoff"] = np.NaN
data.loc[data.nextCGPA >= 0, "nextCGPA_above_cutoff"] = 1
data.loc[data.nextCGPA < 0, "nextCGPA_above_cutoff"] = 0
# Remove zeros from total credits for people whose next GPA is missing
data["total_credits_year2"] = data["totcredits_year2"]
data.loc[np.isnan(data.nextGPA) == True, "total_credits_year2"] = np.NaN
# Add variable for campus specific cutoff
data["cutoff"] = 1.5
data.loc[data.loc_campus3 == 1, "cutoff"] = 1.6
return data
def calculate_bin_frequency(data, bins):
"""
Calculates the frequency of different bins in a dataframe.
Args:
------
data(pd.DataFrame): Dataframe that contains the raw data.
bins(column): Name of column that contains the variable that should be assessed.
Returns:
---------
bin_frequency(pd.DataFrame): Dataframe that contains the frequency of each bin in data and and a constant.
"""
bin_frequency = pd.DataFrame(data[bins].value_counts())
bin_frequency.reset_index(level=0, inplace=True)
bin_frequency.rename(columns={"index": "bins", bins: "freq"}, inplace=True)
bin_frequency = bin_frequency.sort_values(by=["bins"])
bin_frequency["const"] = 1
return bin_frequency
def create_groups_dict(data, keys, columns):
"""
Function creates a dictionary containing different subsets of a dataset. Subsets are created using dummies.
Args:
------
data(pd.DataFrame): Dataset that should be split into subsets.
keys(list): List of keys that should be used in the dataframe.
columns(list): List of dummy variables in dataset that are used for creating subsets.
Returns:
---------
groups_dict(dictionary)
"""
groups_dict = {}
for i in range(len(keys)):
groups_dict[keys[i]] = data[data[columns[i]] == 1]
return groups_dict
def create_predictions(data, outcome, regressors, bandwidth):
steps = np.arange(-1.2, 1.25, 0.05)
predictions_df = pd.DataFrame([])
# Ensure there are no missings in the outcome variable.
data = data.dropna(subset=[outcome])
# Loop through bins or 'steps'.
for step in steps:
df = data[(data.dist_from_cut >= (step - bandwidth)) &
(data.dist_from_cut <= (step + bandwidth))]
# Run regression for with all values in the range specified above.
model = sm.regression.linear_model.OLS(
df[outcome], df[regressors], hasconst=True)
result = model.fit(cov_type='cluster', cov_kwds={
'groups': df['clustervar']})
# Fill in row for each step in the prediction datframe.
predictions_df.loc[step, 'dist_from_cut'] = step
if step < 0:
predictions_df.loc[step, 'gpalscutoff'] = 1
else:
predictions_df.loc[step, 'gpalscutoff'] = 0
predictions_df.loc[step, 'gpaXgpalscutoff'] = (
predictions_df.loc[step, 'dist_from_cut']) * predictions_df.loc[step, 'gpalscutoff']
predictions_df.loc[step, 'gpaXgpagrcutoff'] = (predictions_df.loc[
step, 'dist_from_cut']) * (1 - predictions_df.loc[step, 'gpalscutoff'])
predictions_df.loc[step, 'const'] = 1
# Make prediction for each step based on regression of each step and
# save value in the prediction dataframe.
predictions_df.loc[step, 'prediction'] = result.predict(exog=[[
predictions_df.loc[step, 'const'],
predictions_df.loc[step, 'gpalscutoff'],
predictions_df.loc[step, 'gpaXgpalscutoff'],
predictions_df.loc[step, 'gpaXgpagrcutoff']
]])
predictions_df.round(4)
return predictions_df
def create_bin_frequency_predictions(data, steps, bandwidth):
"""
"""
predictions_df = pd.DataFrame([])
# Loop through bins or 'steps'.
for step in steps:
df = data[(data.bins >= (step - bandwidth)) &
(data.bins <= (step + bandwidth))]
# Run regression for with all values in the range specified above.
model = sm.regression.linear_model.OLS(
df['freq'], df[['const', 'bins']], hasconst=True)
result = model.fit()
# Fill in row for each step in the prediction datframe.
predictions_df.loc[step, 'bins'] = step
predictions_df.loc[step, 'const'] = 1
predictions_df.loc[step, 'prediction'] = result.predict(exog=[[predictions_df.loc[step, 'const'],
predictions_df.loc[
step, 'bins'],
]])
predictions_df.round(4)
return predictions_df
def create_fig3_predictions(groups_dict, regressors, bandwidth):
"""
Compute predicted outcomes for figure 3.
"""
predictions_groups_dict = {}
# Loop through groups:
for group in groups_dict:
steps = np.arange(-1.2, 1.25, 0.05)
predictions_df = pd.DataFrame([])
# Loop through bins or 'steps'.
for step in steps:
# Select dataframe from the dictionary.
df = groups_dict[group][(groups_dict[group].dist_from_cut >= (step - bandwidth)) &
(groups_dict[group].dist_from_cut <= (step + bandwidth))]
# Run regression for with all values in the range specified above.
model = sm.regression.linear_model.OLS(
df['left_school'], df[regressors], hasconst=True)
result = model.fit(cov_type='cluster', cov_kwds={
'groups': df['clustervar']})
# Fill in row for each step in the prediction datframe.
predictions_df.loc[step, 'dist_from_cut'] = step
if step < 0:
predictions_df.loc[step, 'gpalscutoff'] = 1
else:
predictions_df.loc[step, 'gpalscutoff'] = 0
predictions_df.loc[step, 'gpaXgpalscutoff'] = (
predictions_df.loc[step, 'dist_from_cut']) * predictions_df.loc[step, 'gpalscutoff']
predictions_df.loc[step, 'gpaXgpagrcutoff'] = (
predictions_df.loc[step, 'dist_from_cut']) * (1 - predictions_df.loc[step, 'gpalscutoff'])
predictions_df.loc[step, 'const'] = 1
# Make prediction for each step based on regression of each step
# and save value in the prediction dataframe.
predictions_df.loc[step, 'prediction'] = result.predict(exog=[[
predictions_df.loc[step, 'const'],
predictions_df.loc[step, 'gpalscutoff'],
predictions_df.loc[step, 'gpaXgpalscutoff'],
predictions_df.loc[step, 'gpaXgpagrcutoff']
]])
predictions_df = predictions_df.round(4)
# Save the predictions for all groups in a dictionary.
predictions_groups_dict[group] = predictions_df
return predictions_groups_dict
def bootstrap_predictions(n, data, outcome, regressors, bandwidth):
"""
Compute predicted outcome from bootstrap with replacement.
"""
bootstrap_pred = pd.DataFrame({})
for i in range(0, n):
bootstrap = data.sample(n=len(data), replace=True)
pred = create_predictions(
data=bootstrap, outcome=outcome, regressors=regressors, bandwidth=bandwidth)
bootstrap_pred['pred_' + str(i)] = pred.prediction
i = +1
return bootstrap_pred
def get_confidence_interval(data, lbound, ubound, index_var):
"""
Compute confidence interval from data of bootstrapped predictions.
"""
confidence_interval = pd.DataFrame({})
for i in data.index:
confidence_interval.loc[i, "lower_bound"] = np.percentile(data.loc[
i, :], lbound)
confidence_interval.loc[i, "upper_bound"] = np.percentile(data.loc[
i, :], ubound)
confidence_interval[index_var] = confidence_interval.index
return confidence_interval
def bandwidth_sensitivity_summary(
data, outcome, groups_dict_keys, groups_dict_columns, regressors
):
"""
Creates table that summarizes the results for the analysis of bandwidth sensitivity.
"""
bandwidths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2]
arrays = [
np.array([0.1, 0.1, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4,
0.5, 0.5, 0.6, 0.6, 0.7, 0.7, 0.8, 0.8,
0.9, 0.9, 1, 1, 1.1, 1.1, 1.2, 1.2, ]
),
np.array(["probation", "p-value"] * 12),
]
summary = pd.DataFrame(index=arrays, columns=groups_dict_keys)
for val in bandwidths:
sample = data[abs(data["dist_from_cut"]) < val]
groups_dict = create_groups_dict(
sample, groups_dict_keys, groups_dict_columns)
table = estimate_RDD_multiple_datasets(
groups_dict, groups_dict_keys, outcome, regressors
)
summary.loc[(val, "probation"), :] = table["GPA below cutoff (1)"]
summary.loc[(val, "p-value"), :] = table["P-Value (1)"]
for i in summary.columns:
if (summary.loc[(val, "p-value"), i] < 0.1) == False:
summary.loc[(val, "p-value"), i] = "."
summary.loc[(val, "probation"), i] = "x"
return summary
def trim_data(groups_dict, trim_perc, case1, case2):
""" Creates trimmed data for upper and lower bound analysis by trimming the top and bottom percent of
students from control or treatment group. This can be used for the upper bound and lower bound.
* For lower bound use `case1 = True` and `case2 = False`
* For upper bound use `case1 = False` and `case2 = True`.
Args:
--------
groups_dict(dictionary): Dictionary that holds all datasets that should be trimmed.
trim_perc(pd.Series/pd.DataFrame): Series oder dataframe that for each dataset in groups dict specifies
how much should be trimmed.
case1(True or False): Specifies whether lower or upper bound should be trimmed in the case where the the trimamount
is positive and the control group is trimmed.
case2(True or False): Specifies whether lower or upper bound should be trimmed in the case where the the trimamount
is negative and the treatment group is trimmed.
Returns:
---------
trimmed_dict(dictionary): Dictionary holding the trimmed datasets.
"""
trimmed_dict = {}
for key in groups_dict.keys():
# Create data to be trimmed
data = groups_dict[key].copy()
control = data[data.dist_from_cut >= 0].copy()
treat = data[data.dist_from_cut < 0].copy()
trimamount = float(trim_perc[key])
# Trim control group
if trimamount > 0:
n = round(len(control[control.left_school == 1]) * trimamount)
control.sort_values("nextGPA", inplace=True, ascending=case1)
trimmed_students = control.iloc[0:n]
trimmed_students_ids = list(trimmed_students.identifier)
trimmed_control = control[
control.identifier.isin(trimmed_students_ids) == False
]
df = pd.concat([trimmed_control, treat], axis=0)
# If the trim amount is negative, we need to trim the treatment instead
# of the control group.
elif trimamount < 0:
trimamount = abs(trimamount)
n = round(len(treat[treat.left_school == 1]) * trimamount)
treat.sort_values("nextGPA", inplace=True, ascending=case2)
trimmed_students = treat.iloc[0:n]
trimmed_students_ids = list(trimmed_students.identifier)
trimmed_treat = treat[treat.identifier.isin(
trimmed_students_ids) == False]
df = | pd.concat([trimmed_treat, control], axis=0) | pandas.concat |
#Implementing random forests with feature engineering
# importing numpy, pandas, and matplotlib
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from skbio.stats.composition import clr
import sys
# importing sklearn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.decomposition import PCA
from sklearn.random_projection import GaussianRandomProjection
from sklearn import cluster
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.svm import OneClassSVM
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
# importing keras
import keras
import keras.backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback
from keras.models import Model, load_model
# importing util libraries
import datetime
import time
import math
import os
import importlib
# importing custom library
import DNN_models
import exception_handle
#args
args=sys.argv
#Reading the data - training
df=pd.read_csv("./../Data_21Dec20/species_data.csv",index_col=0)
y_train=pd.read_csv("./../METADATA/data_194.csv",index_col=0)
y_test=pd.read_csv("./../METADATA/data_test.csv",index_col=0)
# Feature-selection LEFSe
f= | pd.read_csv("./data/feature_sel_LEFSe/selected_microbes.csv",index_col=0) | pandas.read_csv |
from __future__ import print_function
import random
import yfinance as yf
import os
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from IPython.display import clear_output
from tqdm import tqdm
import pandas_datareader.data as web
import datetime
import argparse
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
import Stock
input_data_path = 'ticker.txt'
ticker_cik = pd.read_csv(input_data_path, delimiter=',')
sym_cik = ticker_cik.copy(deep=True)
sym_cik.set_index('Ticker', inplace=True)
cik_sym = ticker_cik.copy(deep=True)
cik_sym.set_index('CIK', inplace=True)
#Settings of the algorithm
MARKET_CAP_TR = 5
DAY_CLOSE_TR = 30
VOLUME_TR = 2e6
# Looks up Edgar CIK Number
def symbol_to_cik(symbols):
new_symbols = [i.lower() for i in symbols]
cik = [sym_cik.loc[i, 'CIK'] for i in new_symbols]
return cik
# Looks up Symbol from CIK Number:
def cik_to_symbol(ciks):
tickers = [cik_sym.loc[i, 'Ticker'] for i in ciks]
new_tickers = [i.upper() for i in tickers]
return new_tickers
# Turns URL into Soup object
def to_soup(url):
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, 'html.parser')
return soup
def security_size(market_cap):
thresholds = [0, 20e6, 50e6, 100e6, 150e6, 200e6, 500e6, 1e9, 2e9, 5e9, 10e9, 20e9, 1e14]
labels = ['20M-', '50M', '100M', '150M', '200M', '500M', '1B', '2B', '5B', '10B', '20B', '20B+']
for i in range(len(thresholds) - 1):
if (market_cap >= thresholds[i]) & (market_cap < thresholds[i + 1]):
security_category = labels[i]
return security_category
def partition_to_sublists(list_in, partition_size):
# looping till length list_in
for i in range(0, len(list_in), partition_size):
yield list_in[i:i + partition_size]
def contents_compiler(contents_):
for row in range(len(contents_)):
contents_[row][0] = int(contents_[row][0])
if contents_[row][6][-1] == 'M':
scalar = 1 # in million
else:
scalar = 1000 # in million
contents_[row][6] = int(round(float(contents_[row][6][:-1])) * scalar) # in million
contents_[row][8] = float(contents_[row][8])
contents_[row][9] = float(contents_[row][9][:-1])
contents_[row][10] = int(contents_[row][10].replace(',', ''))
def scan_market(symbols, start_date, end_date=datetime.date.today(),
volume_threshold=1.5, price_change_threshold=0.1, export_file=1):
print('SCAN START for period beginning: ' + start_date)
# if rand_set is True the algorithm checks rand_size random samples
end_yahoo = end_date
start_yahoo = start_date
dataset = | pd.DataFrame() | pandas.DataFrame |
import pandas
import numpy as np
import requests
from sklearn.model_selection import train_test_split
# my_lambdata/my_script.py
from my_mod import enlarge
print("HELLO WORLD")
df = pandas.DataFrame({"State": ['CT', "CO", "CA", "TX"]})
print(df.head())
print("--------")
x = 5
print("NUMBER", x)
print("ENLARGED NUMBER", enlarge(x)) # invoking our function!!
df1 = | pandas.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# x13.py
# @Author : wanhanwan (<EMAIL>)
# @Link : ~
# @Date : 2019/11/24 上午9:53:41
"""
X13季节性调整。
注:
cny.csv文件记录中国历年农历春节的日期,目前截止到2020年春节。
x13as.exe是X13主程序目录。
"""
import os
import pandas as pd
import numpy as np
from pathlib import Path
from statsmodels.tsa.x13 import x13_arima_analysis
from pandas import DataFrame, Series, Timestamp
from pandas.tseries.frequencies import to_offset
from functools import lru_cache
from QuantLib.tools import RollingResultWrapper
curr_path = Path(__file__).parent
@lru_cache()
def get_spring_val(before=10, after=7): ###用于生成移动假日参数,移动假日放在cny.dat中
data = pd.read_csv(curr_path/'cny.csv', index_col='rank')
x1= | Series() | pandas.Series |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# TODO: Modify DB to fix 1084
from unittest import TestCase, main
from datetime import datetime
from os import close, remove
from os.path import join, basename, exists
from tempfile import mkstemp
import pandas as pd
from qiita_core.util import qiita_test_checker
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_db.exceptions import (QiitaDBError, QiitaDBUnknownIDError,
QiitaDBStatusError, QiitaDBLookupError)
from qiita_db.study import Study, StudyPerson
from qiita_db.user import User
from qiita_db.util import get_mountpoint, get_count
from qiita_db.data import BaseData, RawData, PreprocessedData, ProcessedData
from qiita_db.metadata_template import PrepTemplate
@qiita_test_checker()
class BaseDataTests(TestCase):
"""Tests the BaseData class"""
def test_init(self):
"""Raises an error if trying to instantiate the base data"""
with self.assertRaises(IncompetentQiitaDeveloperError):
BaseData(1)
@qiita_test_checker()
class RawDataTests(TestCase):
"""Tests the RawData class"""
def setUp(self):
fd, self.seqs_fp = mkstemp(suffix='_seqs.fastq')
close(fd)
fd, self.barcodes_fp = mkstemp(suffix='_barcodes.fastq')
close(fd)
self.filetype = 2
self.filepaths = [(self.seqs_fp, 1), (self.barcodes_fp, 2)]
_, self.db_test_raw_dir = get_mountpoint('raw_data')[0]
with open(self.seqs_fp, "w") as f:
f.write("\n")
with open(self.barcodes_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
# Create some new PrepTemplates
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
self.pt1 = PrepTemplate.create(metadata, Study(1), "16S")
self.pt2 = PrepTemplate.create(metadata, Study(1), "18S")
self.prep_templates = [self.pt1, self.pt2]
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for the raw data"""
# Check that the returned object has the correct id
exp_id = get_count("qiita.raw_data") + 1
obs = RawData.create(self.filetype, self.prep_templates,
self.filepaths)
self.assertEqual(obs.id, exp_id)
# Check that the raw data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.raw_data WHERE raw_data_id=%d" % exp_id)
# raw_data_id, filetype, link_filepaths_status
self.assertEqual(obs, [[exp_id, 2, 'idle']])
# Check that the raw data has been correctly linked with the prep
# templates
sql = """SELECT prep_template_id
FROM qiita.prep_template
WHERE raw_data_id = %s
ORDER BY prep_template_id"""
obs = self.conn_handler.execute_fetchall(sql, (exp_id,))
self.assertEqual(obs, [[self.pt1.id], [self.pt2.id]])
# Check that the files have been copied to right location
exp_seqs_fp = join(self.db_test_raw_dir,
"%d_%s" % (exp_id, basename(self.seqs_fp)))
self.assertTrue(exists(exp_seqs_fp))
self._clean_up_files.append(exp_seqs_fp)
exp_bc_fp = join(self.db_test_raw_dir,
"%d_%s" % (exp_id, basename(self.barcodes_fp)))
self.assertTrue(exists(exp_bc_fp))
self._clean_up_files.append(exp_bc_fp)
# Check that the filepaths have been correctly added to the DB
top_id = self.conn_handler.execute_fetchone(
"SELECT count(1) FROM qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (top_id - 1, top_id))
exp_seqs_fp = "%d_%s" % (exp_id, basename(self.seqs_fp))
exp_bc_fp = "%d_%s" % (exp_id, basename(self.barcodes_fp))
# filepath_id, path, filepath_type_id
exp = [[top_id - 1, exp_seqs_fp, 1, '852952723', 1, 5],
[top_id, exp_bc_fp, 2, '852952723', 1, 5]]
self.assertEqual(obs, exp)
# Check that the raw data have been correctly linked with the filepaths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.raw_filepath WHERE raw_data_id=%d" % exp_id)
# raw_data_id, filepath_id
self.assertEqual(obs, [[exp_id, top_id - 1], [exp_id, top_id]])
def test_create_error(self):
with self.assertRaises(QiitaDBError):
RawData.create(self.filetype, [PrepTemplate(1)], self.filepaths)
def test_get_filepaths(self):
"""Correctly returns the filepaths to the raw files"""
rd = RawData(1)
obs = rd.get_filepaths()
exp = [
(1, join(self.db_test_raw_dir, '1_s_G1_L001_sequences.fastq.gz'),
"raw_forward_seqs"),
(2, join(self.db_test_raw_dir,
'1_s_G1_L001_sequences_barcodes.fastq.gz'), "raw_barcodes")]
self.assertEqual(obs, exp)
def test_studies(self):
"""Correctly returns the study ids"""
rd = RawData(1)
self.assertEqual(rd.studies, [1])
def test_data_types(self):
"""Correctly returns the data_types of raw_data"""
rd = RawData(1)
self.assertEqual(rd.data_types(), ["18S"])
def test_data_types_id(self):
"""Correctly returns the data_types of raw_data"""
rd = RawData(1)
self.assertEqual(rd.data_types(ret_id=True), [2])
def test_filetype(self):
rd = RawData(1)
self.assertEqual(rd.filetype, "FASTQ")
def test_prep_templates(self):
rd = RawData(1)
self.assertEqual(rd.prep_templates, [1])
def test_link_filepaths_status(self):
rd = RawData(1)
self.assertEqual(rd.link_filepaths_status, 'idle')
def test_link_filepaths_status_setter(self):
rd = RawData(1)
self.assertEqual(rd.link_filepaths_status, 'idle')
rd._set_link_filepaths_status('linking')
self.assertEqual(rd.link_filepaths_status, 'linking')
rd._set_link_filepaths_status('unlinking')
self.assertEqual(rd.link_filepaths_status, 'unlinking')
rd._set_link_filepaths_status('failed: error')
self.assertEqual(rd.link_filepaths_status, 'failed: error')
def test_link_filepaths_status_setter_error(self):
rd = RawData(1)
with self.assertRaises(ValueError):
rd._set_link_filepaths_status('not a valid status')
def test_is_preprocessed(self):
self.assertTrue(RawData(1)._is_preprocessed())
rd = RawData.create(self.filetype, self.prep_templates, self.filepaths)
self.assertFalse(rd._is_preprocessed())
def test_clear_filepaths(self):
rd = RawData.create(self.filetype, [self.pt1], self.filepaths)
self.assertTrue(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.raw_filepath "
"WHERE raw_data_id=%s)", (rd.id,))[0])
# add files to clean before cleaning the filepaths
study_id = rd.studies[0]
path_for_removal = join(get_mountpoint("uploads")[0][1], str(study_id))
self._clean_up_files = [join(path_for_removal,
basename(f).split('_', 1)[1])
for _, f, _ in rd.get_filepaths()]
# cleaning the filepaths
rd.clear_filepaths()
self.assertFalse(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.raw_filepath "
"WHERE raw_data_id=%s)", (rd.id,))[0])
def test_clear_filepaths_error(self):
with self.assertRaises(QiitaDBError):
RawData(1).clear_filepaths()
def test_exists(self):
self.assertTrue(RawData.exists(1))
self.assertFalse(RawData.exists(1000))
def test_delete_error_no_exists(self):
# the raw data doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
RawData.delete(1000, 0)
def test_delete_error_raw_data_not_linked(self):
# the raw data and the prep template id are not linked
with self.assertRaises(QiitaDBError):
RawData.delete(1, self.pt2.id)
def test_delete_error_prep_template_no_exists(self):
# the prep template does not exist
with self.assertRaises(QiitaDBError):
RawData.delete(1, 1000)
def test_delete_error_linked_files(self):
# the raw data has linked files
with self.assertRaises(QiitaDBError):
RawData.delete(1, 1)
def test_delete(self):
rd = RawData.create(self.filetype, self.prep_templates,
self.filepaths)
sql_pt = """SELECT prep_template_id
FROM qiita.prep_template
WHERE raw_data_id = %s
ORDER BY prep_template_id"""
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [[self.pt1.id], [self.pt2.id]])
# This delete call will only unlink the raw data from the prep template
RawData.delete(rd.id, self.pt2.id)
# Check that it successfully unlink the raw data from pt2
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [[self.pt1.id]])
self.assertEqual(self.pt2.raw_data, None)
# If we try to remove the RawData now, it should raise an error
# because it still has files attached to it
with self.assertRaises(QiitaDBError):
RawData.delete(rd.id, self.pt1.id)
# Clear the files so we can actually remove the RawData
study_id = rd.studies[0]
path_for_removal = join(get_mountpoint("uploads")[0][1], str(study_id))
self._clean_up_files.extend([join(path_for_removal,
basename(f).split('_', 1)[1])
for _, f, _ in rd.get_filepaths()])
rd.clear_filepaths()
RawData.delete(rd.id, self.pt1.id)
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [])
# Check that all expected rows have been deleted
sql = """SELECT EXISTS(
SELECT * FROM qiita.raw_filepath
WHERE raw_data_id = %s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (rd.id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.raw_data
WHERE raw_data_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (rd.id,))[0])
def test_status(self):
rd = RawData(1)
s = Study(1)
self.assertEqual(rd.status(s), 'private')
# Since the status is inferred from the processed data, change the
# status of the processed data so we can check how it changes in the
# preprocessed data
pd = ProcessedData(1)
pd.status = 'public'
self.assertEqual(rd.status(s), 'public')
# Check that new raw data has sandbox as status since no
# processed data exists for them
rd = RawData.create(self.filetype, self.prep_templates, self.filepaths)
self.assertEqual(rd.status(s), 'sandbox')
def test_status_error(self):
# Let's create a new study, so we can check that the error is raised
# because the new study does not have access to the raw data
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
s = Study.create(User('<EMAIL>'), "Fried chicken microbiome",
[1], info)
rd = RawData(1)
with self.assertRaises(QiitaDBStatusError):
rd.status(s)
@qiita_test_checker()
class PreprocessedDataTests(TestCase):
"""Tests the PreprocessedData class"""
def setUp(self):
self.prep_template = PrepTemplate(1)
self.study = Study(1)
self.params_table = "preprocessed_sequence_illumina_params"
self.params_id = 1
fd, self.fna_fp = mkstemp(suffix='_seqs.fna')
close(fd)
fd, self.qual_fp = mkstemp(suffix='_seqs.qual')
close(fd)
self.filepaths = [(self.fna_fp, 4), (self.qual_fp, 5)]
_, self.db_test_ppd_dir = get_mountpoint(
'preprocessed_data')[0]
self.ebi_submission_accession = "EBI123456-A"
self.ebi_study_accession = "EBI123456-B"
with open(self.fna_fp, "w") as f:
f.write("\n")
with open(self.qual_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for preprocessed data"""
# Check that the returned object has the correct id
obs = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
self.assertEqual(obs.id, 3)
# Check that the preprocessed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, preprocessed_params_table,
# preprocessed_params_id, submitted_to_insdc_status,
# ebi_submission_accession, ebi_study_accession, data_type_id,
# link_filepaths_status, vamps_status, processing_status
exp = [[3, "preprocessed_sequence_illumina_params", 1,
'not submitted', "EBI123456-A", "EBI123456-B", 2, 'idle',
'not submitted', 'not_processed']]
self.assertEqual(obs, exp)
# Check that the preprocessed data has been linked with its study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_preprocessed_data WHERE "
"preprocessed_data_id=3")
exp = [[1, 3]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_fna_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.fna_fp))
self.assertTrue(exists(exp_fna_fp))
self._clean_up_files.append(exp_fna_fp)
exp_qual_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.qual_fp))
self.assertTrue(exists(exp_qual_fp))
self._clean_up_files.append(exp_qual_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (obs_id - 1, obs_id))
exp_fna_fp = "3_%s" % basename(self.fna_fp)
exp_qual_fp = "3_%s" % basename(self.qual_fp)
# filepath_id, path, filepath_type_id
exp = [[obs_id - 1, exp_fna_fp, 4, '852952723', 1, 3],
[obs_id, exp_qual_fp, 5, '852952723', 1, 3]]
self.assertEqual(obs, exp)
def test_create_data_type_only(self):
# Check that the returned object has the correct id
obs = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="18S")
self.assertEqual(obs.id, 3)
# Check that the preprocessed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, preprocessed_params_table,
# preprocessed_params_id, submitted_to_insdc_status,
# ebi_submission_accession, ebi_study_accession, data_type_id,
# link_filepaths_status, vamps_status, processing_status
exp = [[3, "preprocessed_sequence_illumina_params", 1,
'not submitted', None, None, 2, 'idle', 'not submitted',
'not_processed']]
self.assertEqual(obs, exp)
# Check that the preprocessed data has been linked with its study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_preprocessed_data WHERE "
"preprocessed_data_id=3")
exp = [[1, 3]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_fna_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.fna_fp))
self.assertTrue(exists(exp_fna_fp))
self._clean_up_files.append(exp_fna_fp)
exp_qual_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.qual_fp))
self.assertTrue(exists(exp_qual_fp))
self._clean_up_files.append(exp_qual_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (obs_id - 1, obs_id))
exp_fna_fp = "3_%s" % basename(self.fna_fp)
exp_qual_fp = "3_%s" % basename(self.qual_fp)
# filepath_id, path, filepath_type_id
exp = [[obs_id - 1, exp_fna_fp, 4, '852952723', 1, 3],
[obs_id, exp_qual_fp, 5, '852952723', 1, 3]]
self.assertEqual(obs, exp)
# Check that the preprocessed data have been correctly
# linked with the filepaths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_filepath WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, filepath_id
self.assertEqual(obs, [[3, obs_id - 1], [3, obs_id]])
def test_delete_basic(self):
"""Correctly deletes a preprocessed data"""
# testing regular delete
ppd = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
PreprocessedData.delete(ppd.id)
# testing that the deleted preprocessed data can't be instantiated
with self.assertRaises(QiitaDBUnknownIDError):
PreprocessedData(ppd.id)
# and for completeness testing that it raises an error if ID
# doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
PreprocessedData.delete(ppd.id)
# testing that we can not remove cause the preprocessed data != sandbox
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(1)
def test_delete_advanced(self):
# testing that we can not remove cause preprocessed data has been
# submitted to EBI or VAMPS
ppd = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
# fails due to VAMPS submission
ppd.update_vamps_status('success')
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(ppd.id)
ppd.update_vamps_status('failed')
# fails due to EBI submission
ppd.update_insdc_status('success', 'AAAA', 'AAAA')
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(ppd.id)
def test_create_error_dynamic_table(self):
"""Raises an error if the preprocessed_params_table does not exist"""
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "foo", self.params_id,
self.filepaths, data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "preprocessed_foo",
self.params_id, self.filepaths,
data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "foo_params", self.params_id,
self.filepaths, data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "preprocessed_foo_params",
self.params_id, self.filepaths,
data_type="18S")
def test_create_error_data_type(self):
with self.assertRaises(QiitaDBLookupError):
PreprocessedData.create(self.study,
"preprocessed_sequence_illumina_params",
self.params_id, self.filepaths,
data_type="Metabolomics")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study,
"preprocessed_sequence_illumina_params",
self.params_id, self.filepaths,
data_type="Metabolomics",
prep_template=self.prep_template)
def test_get_filepaths(self):
"""Correctly returns the filepaths to the preprocessed files"""
ppd = PreprocessedData(1)
obs = ppd.get_filepaths()
exp = [(3, join(self.db_test_ppd_dir, '1_seqs.fna'),
"preprocessed_fasta"),
(4, join(self.db_test_ppd_dir, '1_seqs.qual'),
"preprocessed_fastq"),
(5, join(self.db_test_ppd_dir, '1_seqs.demux'),
"preprocessed_demux")]
self.assertItemsEqual(obs, exp)
def test_processed_data(self):
"""Correctly returns the processed data id"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.processed_data, [1])
def test_prep_template(self):
"""Correctly returns the prep template"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.prep_template, 1)
def test_study(self):
"""Correctly returns the study"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.study, 1)
def test_ebi_submission_accession(self):
"""Correctly returns the ebi_submission_accession"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.ebi_submission_accession, 'EBI123456-AA')
def test_ebi_ebi_study_accession(self):
"""Correctly returns the ebi_study_accession"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.ebi_study_accession, 'EBI123456-BB')
def test_set_ebi_submission_accession(self):
new = PreprocessedData.create(
self.study, self.params_table, self.params_id, self.filepaths,
prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
new.ebi_submission_accession = 'EBI12345-CC'
self.assertEqual(new.ebi_submission_accession, 'EBI12345-CC')
def test_ebi_study_accession(self):
new = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
new.ebi_study_accession = 'EBI12345-DD'
self.assertEqual(new.ebi_study_accession, 'EBI12345-DD')
def test_submitted_to_insdc_status(self):
"""submitted_to_insdc_status works correctly"""
# False case
pd = PreprocessedData(1)
self.assertEqual(pd.submitted_to_insdc_status(), 'submitting')
# True case
pd = PreprocessedData(2)
self.assertEqual(pd.submitted_to_insdc_status(), 'not submitted')
def test_update_insdc_status(self):
"""Able to update insdc status"""
pd = PreprocessedData(1)
self.assertEqual(pd.submitted_to_insdc_status(), 'submitting')
pd.update_insdc_status('failed')
self.assertEqual(pd.submitted_to_insdc_status(), 'failed')
pd.update_insdc_status('success', 'foo', 'bar')
self.assertEqual(pd.submitted_to_insdc_status(), 'success')
self.assertEqual(pd.ebi_study_accession, 'foo')
self.assertEqual(pd.ebi_submission_accession, 'bar')
with self.assertRaises(ValueError):
| pd.update_insdc_status('not valid state') | pandas.update_insdc_status |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from finquant.moving_average import compute_ma, sma, ema, sma_std, ema_std
from finquant.moving_average import plot_bollinger_band
def test_sma():
orig = np.array(
[
[np.nan, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],
[np.nan, 0.5, 2.5, 6.5, 12.5, 20.5, 30.5, 42.5, 56.5, 72.5],
]
)
dforig = pd.DataFrame({"0": orig[0], "1": orig[1]}).dropna()
l1 = range(10)
l2 = [i ** 2 for i in range(10)]
df = pd.DataFrame({"0": l1, "1": l2})
res = sma(df, span=2).dropna()
assert all((dforig == res).all())
def test_ema():
orig = np.array(
[
[
np.nan,
0.6666666666666666,
1.5555555555555556,
2.5185185185185186,
3.506172839506173,
4.502057613168724,
5.500685871056241,
6.500228623685413,
7.5000762078951375,
8.500025402631714,
],
[
np.nan,
0.6666666666666666,
2.888888888888889,
6.962962962962963,
12.987654320987653,
20.99588477366255,
30.998628257887518,
42.99954275262917,
56.99984758420972,
72.99994919473657,
],
]
)
dforig = pd.DataFrame({"0": orig[0], "1": orig[1]}).dropna()
l1 = range(10)
l2 = [i ** 2 for i in range(10)]
df = pd.DataFrame({"0": l1, "1": l2})
res = ema(df, span=2).dropna()
assert all((abs(dforig - res) <= 1e-15).all())
def test_sma_std():
orig = np.array(
[
[
np.nan,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
0.7071067811865476,
],
[
np.nan,
0.7071067811865476,
2.1213203435596424,
3.5355339059327378,
4.949747468305833,
6.363961030678928,
7.7781745930520225,
9.192388155425117,
10.606601717798213,
12.020815280171307,
],
]
)
dforig = pd.DataFrame({"0": orig[0], "1": orig[1]}).dropna()
l1 = range(10)
l2 = [i ** 2 for i in range(10)]
df = pd.DataFrame({"0": l1, "1": l2})
res = sma_std(df, span=2).dropna()
assert all((abs(dforig - res) <= 1e-15).all())
def test_ema_std():
orig = np.array(
[
[
np.nan,
0.7071067811865476,
0.9746794344808964,
1.1143420667632726,
1.1785687889316867,
1.20612962779329,
1.217443715603457,
1.2219416913579804,
1.2236866244000921,
1.2243507269461653,
],
[
np.nan,
0.7071067811865476,
2.2693611435820435,
4.280032864205755,
6.511621880314852,
8.846731940915395,
11.231335395956103,
13.640730921938678,
16.063365414263,
18.493615652686387,
],
]
)
dforig = pd.DataFrame({"0": orig[0], "1": orig[1]}).dropna()
l1 = range(10)
l2 = [i ** 2 for i in range(10)]
df = | pd.DataFrame({"0": l1, "1": l2}) | pandas.DataFrame |
"""
Modules that can determine diferent metrics to evaluate an algorithm sucess
"""
import logging
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import date
logger = logging.getLogger()
sns.set(style="darkgrid")
class ConfusionMatrix:
"""
Class with method for each algorithm that defines
"""
@staticmethod
def active_companies(all_companies, true_companies, selected_companies):
"""
This function will label all the companies present... as active [1] or not active [0]
With this function we will have all the elements of an confusion matrix
:param true_companies: companies actually answering surveys last week
:param all_companies: the list with ids of all companies present.. depending of the problems can be the
companies present on that week or all the time, depends of the design of the validation part
:param selected_companies: companies identified by the algorithm
:return: labels, true_positives, true_negatives, false_positives, false_positives
"""
# Each companies present on the database, will considered
# Note all_companies is all the companies present in that time frame... actually with more
# than 5 surveys answered, with no more complex analysis
# all the dataframe as an input should have ['id', name] as columns
# Label accordingly with true_companies
all_companies['label_truth'] = 0
if true_companies.shape[0] >= 1:
all_companies.loc[all_companies.id.isin(true_companies['id']), 'label_truth'] = 1
all_companies['label_algorithm'] = 0
all_companies.loc[all_companies.id.isin(selected_companies['id']), 'label_algorithm'] = 1
true_positives = all_companies[(all_companies['label_truth'] == 1) & (all_companies['label_algorithm'] == 1)]
true_negatives = all_companies[(all_companies['label_truth'] == 0) & (all_companies['label_algorithm'] == 0)]
false_negatives = all_companies[(all_companies['label_truth'] == 1) & (all_companies['label_algorithm'] == 0)]
false_positives = all_companies[(all_companies['label_truth'] == 0) & (all_companies['label_algorithm'] == 1)]
return all_companies[
['label_truth', 'label_algorithm']], true_positives, true_negatives, false_negatives, false_positives
class StatsLastWeek:
@staticmethod
def companies(companies_last_week, surveys_last_week, users_last_week):
"""
Stats related with all parameter
:param companies_last_week:
:param surveys_last_week:
:param users_last_week:
:return: days_of_existence
"""
# Compute numbers of users per_companies
# Clean users first
# Filter the enabled users
users_last_week = users_last_week[users_last_week['is_enabled'] == 1]
# Filter the nan users
users_last_week = users_last_week[~users_last_week.user_id.isna()]
# Filter the deleted users
users_last_week = users_last_week[users_last_week.deleted_at.isna()]
# Having Only for the companies
# Determine days_of_existence
days_of_existence = pd.to_datetime(date.today().strftime("%Y-%m-%d 00:00:00")) - pd.to_datetime(
companies_last_week['created_at'])
# Determine number of users per company
nr_users_per_company = users_last_week.groupby('company_id')['user_id'].count()
viable_surveys = surveys_last_week[surveys_last_week.user_id.isin(users_last_week.user_id)]
viable_surveys_grouped = pd.merge(viable_surveys, users_last_week[['user_id', 'company_id']], on='user_id')
viable_surveys_grouped = viable_surveys_grouped.groupby('company_id')['user_id'].count()
# Determine normal rate, for that i have to determine number total of weeks
survey_weights = 100 * viable_surveys_grouped / viable_surveys_grouped.sum()
stats = companies_last_week[['id', 'name']]
stats = pd.concat([stats, days_of_existence], axis=1)
stats.columns = ['company_id', 'company_name', 'days_of_existence']
stats2 = pd.concat([nr_users_per_company, survey_weights], axis=1)
stats2 = stats2.reset_index()
stats2.columns = ['company_id', 'nr_of_users', 'surveys_weights']
stats = pd.merge(stats, stats2, on='company_id')
return stats
class Stats:
@staticmethod
def companies_survey_rates(companies, surveys, company_users, total_weeks=None):
# Ok so now i have all the companies
company_users = company_users[company_users['is_enabled'] == 1]
# Filter the nan users
company_users = company_users[~company_users.user_id.isna()]
# Filter the deleted users
company_users = company_users[company_users.deleted_at.isna()]
# Select users from companies
selected_users = company_users[company_users.company_id.isin(companies['id'])]
selected_surveys = surveys[surveys.user_id.isin(selected_users['user_id'])]
# Answering Rate
selected_surveys = | pd.merge(selected_surveys, selected_users[['user_id', 'company_id']], on="user_id") | pandas.merge |
import numpy as np
import os
import csv
import requests
import pandas as pd
import time
import datetime
from stockstats import StockDataFrame as Sdf
from ta import add_all_ta_features
from ta.utils import dropna
from ta import add_all_ta_features
from ta.utils import dropna
from config import config
def load_dataset(*, file_name: str) -> pd.DataFrame:
"""
load csv dataset from path
:return: (df) pandas dataframe
"""
#_data = pd.read_csv(f"{config.DATASET_DIR}/{file_name}")
_data = pd.read_csv(file_name)
return _data
def data_split(df,start,end):
"""
split the dataset into training or testing using date
:param data: (df) pandas dataframe, start, end
:return: (df) pandas dataframe
"""
data = df[(df.datadate >= start) & (df.datadate < end)]
data=data.sort_values(['datadate','tic'],ignore_index=True)
#data = data[final_columns]
data.index = data.datadate.factorize()[0]
return data
def calcualte_price(df):
"""
calcualte adjusted close price, open-high-low price and volume
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
data = df.copy()
data = data[['datadate', 'tic', 'prccd', 'ajexdi', 'prcod', 'prchd', 'prcld', 'cshtrd']]
data['ajexdi'] = data['ajexdi'].apply(lambda x: 1 if x == 0 else x)
data['adjcp'] = data['prccd'] / data['ajexdi']
data['open'] = data['prcod'] / data['ajexdi']
data['high'] = data['prchd'] / data['ajexdi']
data['low'] = data['prcld'] / data['ajexdi']
data['volume'] = data['cshtrd']
data = data[['datadate', 'tic', 'adjcp', 'open', 'high', 'low', 'volume']]
data = data.sort_values(['tic', 'datadate'], ignore_index=True)
return data
def add_technical_indicator(df):
"""
calcualte technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
stock = Sdf.retype(df.copy())
stock['close'] = stock['adjcp']
unique_ticker = stock.tic.unique()
macd = pd.DataFrame()
rsi = pd.DataFrame()
cci = pd.DataFrame()
dx = pd.DataFrame()
#temp = stock[stock.tic == unique_ticker[0]]['macd']
for i in range(len(unique_ticker)):
## macd
temp_macd = stock[stock.tic == unique_ticker[i]]['macd']
temp_macd = pd.DataFrame(temp_macd)
macd = macd.append(temp_macd, ignore_index=True)
## rsi
temp_rsi = stock[stock.tic == unique_ticker[i]]['rsi_30']
temp_rsi = | pd.DataFrame(temp_rsi) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.