prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import warnings
import logging
warnings.filterwarnings('ignore', category=FutureWarning)
from .index import build as build_index
from .index import build_from_matrix, LookUpBySurface, LookUpBySurfaceAndContext
from .embeddings.base import load_embeddings, EmbedWithContext
from .ground_truth.data_processor import WikipediaDataset, InputExample, convert_examples_to_features
import click
import numpy as np
import pandas as pd
import dask.dataframe as dd
from tqdm import tqdm as tqdm
import pyarrow as pa
import pyarrow.parquet as pq
from pathlib import Path
from qurator.utils.parallel import run as prun
from numpy.linalg import norm
from numpy.matlib import repmat
import json
import sqlite3
from sklearn.utils import shuffle
from qurator.sbb_ner.models.tokenization import BertTokenizer
from multiprocessing import Semaphore
logger = logging.getLogger(__name__)
@click.command()
@click.argument('all-entities-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding-type', type=click.Choice(['fasttext', 'bert', 'flair']), required=True, nargs=1)
@click.argument('entity-type', type=str, required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('output-path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--n-processes', type=int, default=6, help='Number of parallel processes. default: 6.')
@click.option('--distance-measure', type=click.Choice(['angular', 'euclidean']), default='angular',
help="default: angular")
@click.option('--split-parts', type=bool, is_flag=True, help="Process entity surfaces in parts.")
@click.option('--model-path', type=click.Path(exists=True),
default=None, help="From where to load the embedding model.")
@click.option('--layers', type=str, default="-1,-2,-3,-4", help="Which layers to use. default -1,-2,-3,-4")
@click.option('--pooling', type=str, default="first", help="How to pool the output for different tokens/words. "
"default: first.")
@click.option('--scalar-mix', type=bool, is_flag=True, help="Use scalar mix of layers.")
@click.option('--max-iter', type=int, default=None, help='Perform only max-iter iterations (for testing purposes). '
'default: process everything.')
def build(all_entities_file, embedding_type, entity_type, n_trees, output_path,
n_processes, distance_measure, split_parts, model_path, layers, pooling, scalar_mix=False, max_iter=None):
"""
Create an approximative nearest neightbour index, based on the surface strings of entities that enables a fast
lookup of NE-candidates.
ALL_ENTITIES_FILE: Pandas DataFrame pickle that contains all entites.
EMBEDDING_TYPE: Type of embedding [ fasttext, bert ]
ENTITY_TYPE: Type of entities, for instance ORG, LOC, PER ...
N_TREES: Number of trees in the approximative nearest neighbour index
OUTPUT_PATH: Where to write the result files.
"""
embeddings = load_embeddings(embedding_type, model_path=model_path, layers=layers, pooling_operation=pooling,
use_scalar_mix=scalar_mix)
build_index(all_entities_file, embeddings, entity_type, n_trees, n_processes, distance_measure, split_parts,
output_path, max_iter)
@click.command()
@click.argument('tagged-parquet', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding-type', type=click.Choice(['fasttext', 'bert']), required=True, nargs=1)
@click.argument('entities_file', type=str, required=True, nargs=1)
@click.argument('ent-type', type=str, required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('distance-measure', type=click.Choice(['angular', 'euclidean']), required=True, nargs=1)
@click.argument('output-path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--search-k', type=int, default=50, help="Number of NN to be considered. default: 50.")
@click.option('--max-dist', type=float, default=0.25, help="Maximum permitted NN distance. default: 0.25")
@click.option('--processes', type=int, default=6, help='Number of parallel processes. default: 6.')
@click.option('--save-interval', type=int, default=10000, help='Save result every N steps. default: 10000.')
@click.option('--split-parts', type=bool, is_flag=True, help="Process entity surfaces in parts.")
@click.option('--max-iter', type=float, default=np.inf, help="Number of evaluation iterations. "
"default: evaluate everything.")
@click.option('--model-path', type=click.Path(exists=True),
default=None, help="from where to load the embedding model.")
def evaluate(tagged_parquet, embedding_type, entities_file, ent_type, n_trees,
distance_measure, output_path, search_k, max_dist, processes, save_interval,
split_parts, max_iter, model_path):
"""
Evaluate the NE-lookup performance of some approximative nearest neighbour index.
Runs through a many Wikipedia texts where the occurrences of named entities have been marked.
Determines how often the ANN-index manages to provide the correct candidate among the nearest neighbours.
TAGGET_PARQUET: A sqlite file that contains the pre-processed wikipedia text (see tag_entities2sqlite for details)
EMBEDDING_TYPE: 'fasttext' or 'bert'
ENTITIES_FILE: The entity table as pickled Pandas DataFrame.
ENT_TYPE: What type of entities should be considered, for instance: 'PER', 'LOC' or 'ORG'.
N_TREES: Number trees in the approximative nearest neighbour index.
DISTANCE_MEASURE: of the approximative nearest neighbour index, i.e, 'angular' or 'euclidian'.
OUTPUT_PATH: Where to store the result.
"""
embeddings = load_embeddings(embedding_type, model_path=model_path)
print("Reading entity linking ground-truth file: {}".format(tagged_parquet))
df = dd.read_parquet(tagged_parquet)
print("done.")
data_sequence = tqdm(df.iterrows(), total=len(df))
result_path = '{}/nedstat-embt_{}-entt_{}-nt_{}-dm_{}-sk_{}-md_{}.parquet'.\
format(output_path, embedding_type, ent_type, n_trees, distance_measure, search_k, max_dist)
print("Write result statistics to: {} .".format(result_path))
total_successes = mean_rank = mean_len_rank = 0
results = []
def write_results():
nonlocal results
if len(results) == 0:
return
res = pd.concat(results)
# noinspection PyArgumentList
table = pa.Table.from_pandas(res)
pq.write_to_dataset(table, root_path=result_path)
results = []
for total_processed, (entity_title, ranking) in \
enumerate(LookUpBySurface.run(entities_file, {ent_type: embeddings}, data_sequence, split_parts, processes,
n_trees, distance_measure, output_path, search_k, max_dist)):
# noinspection PyBroadException
try:
mean_len_rank += len(ranking)
ranking['true_title'] = entity_title
hits = ranking.loc[ranking.guessed_title == entity_title].copy()
if len(hits) > 0:
hits['success'] = True
result = hits
total_successes += 1
mean_rank += result['rank'].min()
else:
result = ranking.iloc[[0]].copy()
result['success'] = False
results.append(result)
if len(results) >= save_interval:
write_results()
data_sequence.\
set_description('Total processed: {:.3f}. Success rate: {:.3f}. Mean rank: {:.3f}. '
'Mean len rank: {:.3f}.'. format(total_processed, total_successes / total_processed,
mean_rank / (total_successes + 1e-15),
mean_len_rank / total_processed))
if total_processed > max_iter:
break
except:
print("Error: ", ranking, 'page_tile: ', entity_title)
# raise
write_results()
return result_path
@click.command()
@click.argument('all-entities-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('tagged_parquet', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding_type', type=click.Choice(['flair']), required=True, nargs=1)
@click.argument('ent_type', type=str, required=True, nargs=1)
@click.argument('output_path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--max-iter', type=float, default=np.inf)
@click.option('--processes', type=int, default=6)
@click.option('--w-size', type=int, default=10)
@click.option('--batch-size', type=int, default=100)
@click.option('--start-iteration', type=int, default=100)
def build_context_matrix(all_entities_file, tagged_parquet, embedding_type, ent_type, output_path,
processes=6, save_interval=100000, max_iter=np.inf, w_size=10, batch_size=100,
start_iteration=0):
embeddings = load_embeddings(embedding_type)
print("Reading entity linking ground-truth file: {}.".format(tagged_parquet))
df = dd.read_parquet(tagged_parquet)
print("done.")
data_sequence = tqdm(df.iterrows(), total=len(df))
result_file = '{}/context-embeddings-embt_{}-entt_{}-wsize_{}.pkl'.\
format(output_path, embedding_type, ent_type, w_size)
all_entities = pd.read_pickle(all_entities_file)
all_entities = all_entities.loc[all_entities.TYPE == ent_type]
all_entities = all_entities.reset_index().reset_index().set_index('page_title').sort_index()
context_emb = None # lazy creation
for it, link_result in \
enumerate(
EmbedWithContext.run(embeddings, data_sequence, ent_type, w_size, batch_size,
processes, start_iteration=start_iteration)):
try:
if context_emb is None:
dims = len(link_result.drop(['entity_title', 'count']).astype(np.float32).values)
context_emb = np.zeros([len(all_entities), dims + 1], dtype=np.float32)
if it % save_interval == 0:
print('Saving ...')
pd.DataFrame(context_emb, index=all_entities.index).to_pickle(result_file)
idx = all_entities.loc[link_result.entity_title]['index']
context_emb[idx, 1:] += link_result.drop(['entity_title', 'count']).astype(np.float32).values
context_emb[idx, 0] += float(link_result['count'])
data_sequence.set_description('#entity links processed: {}'.format(it))
except:
print("Error: ", link_result)
raise
if it >= max_iter:
break
pd.DataFrame(context_emb, index=all_entities.index).to_pickle(result_file)
return result_file
@click.command()
@click.argument('context-matrix-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('distance-measure', type=click.Choice(['angular', 'euclidean']), required=True, nargs=1)
def build_from_context_matrix(context_matrix_file, n_trees, distance_measure):
build_from_matrix(context_matrix_file, distance_measure, n_trees)
def links_per_entity(context_matrix_file):
df = | pd.read_pickle(context_matrix_file) | pandas.read_pickle |
import numpy as np
import nibabel as nib
import os.path as op
import pandas as pd
from glob import glob
from scipy import ndimage
from nilearn.datasets import fetch_atlas_harvard_oxford, load_mni152_template
from nilearn.image import coord_transform
def extract_roi_info(statfile, stat_name=None, out_dir=None, unilateral=True,
minimum_nr_of_vox=20, stat_threshold=None):
"""
Extracts information per ROI for a given statistics-file.
Reads in a thresholded (!) statistics-file (such as a thresholded z- or
t-stat from a FSL first-level directory) and calculates for a set of ROIs
the number of significant voxels included and its maximum value
(+ coordinates). Saves a csv-file in the same directory as the
statistics-file. Assumes that the statistics file is in MNI152 2mm space.
Parameters
----------
statfile : str
Absolute path to statistics-file (nifti) that needs to be evaluated.
stat_name : str
Name for the contrast/stat-file that is being analyzed
out_dir : str
Path to output-directory
unilateral : bool
Whether to use unilateral masks
minimum_nr_of_vox : int
Minimum cluster size (i.e. clusters with fewer voxels than this number
are discarded; also, ROIs containing fewer voxels than this will not
be listed on the CSV.
stat_threshold : int or float
If the stat-file contains uncorrected data, stat_threshold can be used
to set a lower bound.
Returns
-------
df : Dataframe
Dataframe corresponding to the written csv-file.
"""
data = nib.load(statfile).get_data()
if stat_threshold is not None:
data[data < stat_threshold] = 0
if stat_name is None:
stat_name = op.basename(statfile).split('.')[0]
mni_affine = load_mni152_template().affine
sign_mask = np.ones(shape=data.shape)
sign_mask[data < 0] = -1
if unilateral:
cort_rois = fetch_atlas_harvard_oxford('cort-maxprob-thr0-2mm', symmetric_split=True)
subc_rois = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm', symmetric_split=True)
else:
cort_rois = fetch_atlas_harvard_oxford('cort-maxprob-thr0-2mm', symmetric_split=False)
subc_rois = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm', symmetric_split=False)
IGNORE_ROIS = ['Cerebral White Matter', 'Cerebral Cortex', 'Background', 'Ventricle',
'Ventrical']
# Start clustering of data
clustered_data, _ = ndimage.label(data > 0, structure=np.ones((3, 3, 3)))
cluster_ids, cluster_sizes = np.unique(clustered_data.ravel(), return_counts=True)
cluster_ids = cluster_ids[cluster_sizes.argsort()[::-1]][1:]
if len(cluster_ids) == 0:
print("Found 0 clusters!")
return
stats_dfs = []
for i, cluster_id in enumerate(cluster_ids): # largest to smallest
cluster_idx = clustered_data == cluster_id
cluster_max = data[cluster_idx].max()
cluster_size = cluster_idx.sum()
tmp = np.zeros(data.shape)
tmp[cluster_idx] = data[cluster_idx] == cluster_max
# in case of multiple voxels with same stat / weight
if np.sum(tmp == 1) > 1:
X, Y, Z = [coord[0] for coord in np.where(tmp == 1)]
else:
X, Y, Z = np.where(tmp == 1)
# convert to MNI-coordinates
X, Y, Z = coord_transform(X, Y, Z, mni_affine)
stats_dict = {
'Region': [],
'K': [],
'Max.': [],
'Division': []
}
for atlas, a_name in [(cort_rois, 'cort'), (subc_rois, 'scort')]:
atlas_map = atlas['maps'].get_data()
labels = atlas['labels']
for ii, roi in enumerate(labels):
if any(roi2ignore in roi for roi2ignore in IGNORE_ROIS):
continue
roi_idx = atlas_map == ii
overlap_idx = np.logical_and(cluster_idx, roi_idx)
n_vox_per_roi = overlap_idx.sum()
if n_vox_per_roi > minimum_nr_of_vox:
max_stat = data[overlap_idx].max()
stats_dict['Region'].append(roi)
stats_dict['K'].append(n_vox_per_roi)
stats_dict['Max.'].append(max_stat)
stats_dict['Division'].append(a_name)
stats_df = pd.DataFrame(stats_dict)
stats_df = stats_df.sort_values(by=['Division', 'K'], ascending=[True, False], axis=0)
if stats_df.shape[0] == 0:
continue
for col in ['Cluster nr.', 'Cluster size', 'Cluster max.', 'X', 'Y', 'Z']:
stats_df[col] = np.nan
stats_df.loc[stats_df.index[0], 'Cluster nr.'] = i+1
stats_df.loc[stats_df.index[0], 'Cluster size'] = cluster_size
stats_df.loc[stats_df.index[0], 'Cluster max.'] = cluster_max
stats_df.loc[stats_df.index[0], 'X'] = X
stats_df.loc[stats_df.index[0], 'Y'] = Y
stats_df.loc[stats_df.index[0], 'Z'] = Z
stats_df = stats_df.append( | pd.Series([np.nan]) | pandas.Series |
import pandas as pd
import numpy as np
#modify file name you want to split into train and test set
data = | pd.read_csv('./gravel_clay_class3_total.csv') | pandas.read_csv |
import time # 引入time模块
import pandas as pd
import re
import sqlparse
attributeNameArray = ['tableName', 'createTime', 'lastModifyTime', 'owner', 'rowNumber', 'columnNumber',
'primaryKey', 'uniqueKey', 'foreignKey', 'notNullColumn', 'indexColumn', 'columnDataType']
remarksList = ['表名', '创建时间', '最后修改时间', '所有者', '数据行数', '字段数', '主键',
'唯一键', '外键', '不能为空字段', '索引字段', '数据类型']
# 这个函数是自己的拼接函数 str2TableClass 中会调用
def myConcat(array: list, separator: str):
temp = ""
for i in range(0, len(array)):
temp += array[i] + separator
temp = temp[:-1]
return temp
# 这个函数用来根据正则解析传入的create table指令 数据分解出来 tableinit 会调用
def str2TableClass(tempStr: str, tableName: str):
tempStr = re.search(r"[(](.*)[)]", tempStr).group(1) # 拿到括号里的内容
primaryKey = ""
uniqueKey = ""
foreignKey = ""
# primary key部分
p1 = re.search(r"primary key(.*?)[(](.*?)[)]", tempStr)
# print(p1.group(0))
# print(p1.group(2) + " 主键值")
if p1 is not None:
primaryKey = p1.group(2).strip()
primaryKeyList = primaryKey.split(",")
for index, ele in enumerate(primaryKeyList):
primaryKeyList[index] = ele.strip()
primaryKey = myConcat(primaryKeyList, ",")
tempStr = re.sub(r"primary key(.*?)[(](.*?)[)]", "", tempStr) # 删除primary key 防止影响到后边内容
# unique key部分
p2 = re.search(r"unique key(.*?)[(](.*?)[)]", tempStr)
# print(p2.group(0))
# print(p2.group(2) + " 唯一键值")
if p2 is not None:
uniqueKey = p2.group(2)
tempStr = re.sub(r"unique key(.*?)[(](.*?)[)]", "", tempStr)
# foreign key部分 这里其实有bug foreign key 可以有多个 但是我这里 search方法只能找到一个
p3 = re.search(r"foreign key(.*?)[(](.*?)[)](.*?)references(.*?)[(](.*?)[)]", tempStr)
# print(p2.group(0))
# print(p2.group(2) + " 当前表中值")
# print(p2.group(4).strip() + " 被参考的表名")
# print(p2.group(5).strip() + " 外表的键")
if p3 is not None:
foreignKey = p3.group(2) + "|" + p3.group(4).strip() + "|" + p3.group(5).strip()
tempStr = re.sub(r"foreign key(.*?)[(](.*?)[)](.*?)references(.*?)[(](.*?)[)]", "", tempStr)
# 分解 剩下的 这样里边全都是类似 school varchar not null 、 age int 或者是空格 的字符串
array = tempStr.split(",")
tempArray = [] # 用于临时记录去除空格的形如 school varchar not null 这样的
columnCount = 0 # 用来计数有多少个字段 因为存在全是空格的字符串
for ele in array:
if not ele.isspace(): # 自带函数 当全是空格的时候 为 true
columnCount += 1 # 用来计数有多少个字段 因为存在全是空格的字符串
tempArray.append(ele.strip()) # 去除前后两边的空格
columnNameArray = [] # 字段名数组
columnDataTypeArray = [] # 字段类型数组
notNullColumn = [] # 设置了不空的字段
for ele in tempArray:
p = re.search(r"(.*?)not( +)null", ele)
if p is None:
arrayAA = re.split(r" +", ele.strip())
else:
arrayAA = re.split(r" +", p.group(1).strip())
notNullColumn.append(arrayAA[0])
# 将提取出来的 字段名 和 字段类型 添加进去
columnNameArray.append(arrayAA[0])
columnDataTypeArray.append(arrayAA[1])
uniqueKeyList = uniqueKey.strip().split(",")
uniqueKey = myConcat(uniqueKeyList, ",")
# myConcat是自己写的函数 将notNull的column拼接起来 形如 school,home
notNullColumnStr = myConcat(notNullColumn, ",")
notNullColumnStr += "," + primaryKey + "," +uniqueKey # 加上主键也不能为空
# 拼接成形如 id#int,name#varchar,age#int,school#varchar,home#varchar,aad#varchar 的字符串
# 前边是 字段名称 后边是字段类型 两者用#分割 不同字段之间用, 分割
temp = ""
for i in range(0, len(columnNameArray)):
temp += columnNameArray[i] + "#" + columnDataTypeArray[i] + ","
columnDataTypeArrayStr = temp[:-1]
# 构造一个类 很好用
print(tempStr)
tableTemp = Table(tableName=tableName,
createTime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
lastModifyTime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
owner="root", rowNumber=0, columnNumber=columnCount,
primaryKey=primaryKey, uniqueKey=uniqueKey, foreignKey=foreignKey,
notNullColumn=notNullColumnStr, indexColumn="", columnDataType=columnDataTypeArrayStr)
# 将一些信息存入类中 后边还会用
tableTemp.columnNameArray = columnNameArray
tableTemp.columnDataTypeArray = columnDataTypeArray
return tableTemp
# 用来进行表的初始化 主要做的就是提取数据 然后把相关信息写入excel表中去
def tableInit(databaseLocation: str, databaseName: str, currentIndex: int, tokens):
for index in range(currentIndex, len(tokens)):
while str(tokens[index].ttype) != "None":
index += 1
tableName = str(tokens[index].tokens[0])
tempStr = str(tokens[index])
break
# 引入writer 防止覆盖 这样可以向两个工作表(sheet)中写入信息
src = databaseLocation + "\\" + databaseName.upper() + "\\" + tableName + ".xlsx"
writer = pd.ExcelWriter(src, engine='openpyxl')
initTableAttributeObject = str2TableClass(tempStr, tableName)
tempArray = list(range(1, len(attributeNameArray) + 1)) # 索引列需要
s1 = pd.Series(tempArray, index=tempArray, name="index") # 索引列 一共需要12个属性
s2 = | pd.Series(attributeNameArray, index=tempArray, name="attribute") | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pkg_resources
from pandas.testing import assert_frame_equal
from Xclusion_criteria.xclusion_crits import (
check_in_md,
check_factors,
check_index,
check_islist,
check_key,
check_numeric_indicator,
check_var_in_md,
get_criteria,
do_filtering
)
ROOT = pkg_resources.resource_filename('Xclusion_criteria', 'tests')
class TestCrits(unittest.TestCase):
def setUp(self):
self.messages = []
self.criteria = {}
self.md = pd.DataFrame({
'antibiotic_history': ['Yes', 'No', 'No'],
'col2': [1., 2., 3.],
'col3': [1.3, 1, 'missing']
})
self.nulls = ['missing', 'not applicable']
def test_check_in_md(self):
check_in_md(
['var_1', 'var_2', 'var_3'],
['var_1', 'var_2'],
self.criteria,
self.messages,
['missing', 'not applicable'],
'init'
)
criteria = {'init': {
('var_1', '0'): ['missing', 'not applicable'],
('var_2', '0'): ['missing', 'not applicable']
}
}
messages = ['Variable var_3 not in metadata (skipped)']
self.assertEqual(self.criteria, criteria)
self.assertEqual(self.messages, messages)
def test_check_factors(self):
test_message = []
test_boolean, test_values = check_factors('col', '2', ['what', 'ever'], self.nulls, pd.DataFrame({'col': ['anything', 'else']}), test_message)
self.assertEqual(test_boolean, False)
self.assertEqual(test_values, ['what', 'ever'])
self.assertEqual(test_message, [])
test_message = []
test_boolean, test_values = check_factors('col', '1', ['f4', 'f5'], self.nulls, pd.DataFrame({'col': ['f1', 'f2']}), test_message)
self.assertEqual(test_boolean, True)
self.assertEqual(test_values, [])
self.assertEqual(test_message, ['Subset values for variable col not in table (skipped)'])
test_message = []
test_boolean, test_values = check_factors('col', '1', ['f1', 'f2', 'f3'], self.nulls, pd.DataFrame({'col': ['f1', 'f2']}), test_message)
self.assertEqual(test_boolean, False)
self.assertEqual(test_values, ['f1', 'f2'])
self.assertEqual(test_message, ['[Warning] Subset values for variable col not in table\n - f3'])
def test_check_index(self):
test_boolean = check_index('0', ['f1', 'f2', 'f3'], [])
self.assertEqual(test_boolean, False)
test_boolean = check_index('1', ['f1', 'f2', 'f3'], [])
self.assertEqual(test_boolean, False)
test_message = []
test_boolean = check_index('2', [None], test_message)
self.assertEqual(test_boolean, True)
test_message = []
test_boolean = check_index('2', ['f1', 'f2', 'f3'], test_message)
self.assertEqual(test_boolean, True)
self.assertEqual(test_message,
['For min-max subsetting, two-items list need: no min (or no max) should be "None"'])
def check_is_list(self):
test_messages = []
test_boolean = check_islist('var', ['f1', 'f2', 'f3'], test_messages)
self.assertEqual(test_boolean, False)
self.assertEqual(test_messages, [])
test_messages = []
test_boolean = check_islist('var', 'f1', test_messages)
self.assertEqual(test_boolean, True)
self.assertEqual(test_messages, ['Values to subset for must be in a list format (var skipped)'])
test_messages = []
test_boolean = check_islist('var', False, test_messages)
self.assertEqual(test_boolean, True)
self.assertEqual(test_messages, ['Values to subset for must be in a list format (var skipped)'])
test_messages = []
test_boolean = check_islist('var', {'f1'}, test_messages)
self.assertEqual(test_boolean, True)
self.assertEqual(test_messages, ['Values to subset for must be in a list format (var skipped)'])
def check_numeric_indicator(self):
test_boolean = check_numeric_indicator('col', '0', [])
self.assertEqual(test_boolean, False)
test_boolean = check_numeric_indicator('col', '1', [])
self.assertEqual(test_boolean, False)
test_boolean = check_numeric_indicator('col', '2', [])
self.assertEqual(test_boolean, False)
test_messages = []
test_boolean = check_numeric_indicator('col', 'x', test_messages)
self.assertEqual(test_boolean, False)
self.assertEqual(test_messages, ['Numeric indicator not "0", "1" or "2" (x) (col skipped)'])
def check_var_in_md(self):
test_messages = []
test_boolean = check_var_in_md('col1', ['col1', 'col2'], test_messages)
self.assertEqual(test_boolean, False)
self.assertEqual(test_messages, [])
test_boolean = check_var_in_md('col1', ['col2', 'col3'], test_messages)
self.assertEqual(test_boolean, True)
self.assertEqual(test_messages, ['Variable col1 not in metadata (skipped)'])
def check_in_md(self):
test_messages = []
test_criteria = {}
check_in_md(['col1', 'col2'], ['col1', 'col2', 'col3'],
test_criteria, test_messages, ['missing'], 'init')
self.assertEqual(test_messages, [])
self.assertEqual(test_criteria, {('col1', '0'): ['missing'], ('col2', '0'): ['missing']})
test_messages = []
test_criteria = {}
check_in_md(['col1', 'col2'], ['col2', 'col3'],
test_criteria, test_messages, ['missing'], 'init')
self.assertEqual(test_messages, ['Variable col1 not in metadata (skipped)'])
self.assertEqual(test_criteria, {('col1', '0'): ['missing']})
test_messages = []
test_criteria = {}
check_in_md(['col1', 'col2'], ['col3'],
test_criteria, test_messages, ['missing'], 'init')
self.assertEqual(test_messages, ['Variable col1 not in metadata (skipped)',
'Variable col2 not in metadata (skipped)'])
self.assertEqual(test_criteria, {})
def test_check_key(self):
test_messages = []
test_boolean = check_key('col,0', test_messages)
self.assertEqual(test_boolean, False)
self.assertEqual(test_messages, [])
test_messages = []
test_boolean = check_key('col+0', test_messages)
self.assertEqual(test_boolean, True)
self.assertEqual(test_messages, ['Must have a metadata variable and a numeric separated by a comma (",")'])
test_messages = []
test_boolean = check_key('col,0,', test_messages)
self.assertEqual(test_boolean, True)
self.assertEqual(test_messages, ['Must have a metadata variable and a numeric separated by a comma (",")'])
def test_get_criteria(self):
no_comma = '%s/criteria/criteria_no_comma.yml' % ROOT
test_messages = []
test_criteria = get_criteria(no_comma, self.md, self.nulls, test_messages)
self.assertEqual(test_criteria, {})
self.assertEqual(test_messages, ['Must have a metadata variable and a numeric separated by a comma (",")'])
no_correct_index = '%s/criteria/criteria_no_correct_index.yml' % ROOT
test_messages = []
test_criteria = get_criteria(no_correct_index, self.md, self.nulls, test_messages)
self.assertEqual(test_criteria, {})
self.assertEqual(test_messages, ['Numeric indicator not "0", "1" or "2" (9) (antibiotic_history skipped)'])
no_index = '%s/criteria/criteria_no_index.yml' % ROOT
test_messages = []
test_criteria = get_criteria(no_index, self.md, self.nulls, test_messages)
self.assertEqual(test_criteria, {})
self.assertEqual(test_messages, ['Must have a metadata variable and a numeric separated by a comma (",")'])
var_not_in_md = '%s/criteria/criteria_var_not_in_md.yml' % ROOT
test_messages = []
test_criteria = get_criteria(var_not_in_md, self.md, self.nulls, test_messages)
self.assertEqual(test_criteria, {})
self.assertEqual(test_messages, ['Variable not_in_md not in metadata (skipped)'])
is_not_list = '%s/criteria/criteria_is_not_list.yml' % ROOT
test_messages = []
test_criteria = get_criteria(is_not_list, self.md, self.nulls, test_messages)
self.assertEqual(test_criteria, {})
self.assertEqual(test_messages, ['Values to subset for must be in a list format (antibiotic_history skipped)'])
wrong_minmax = '%s/criteria/criteria_wrong_minmax.yml' % ROOT
test_messages = []
test_criteria = get_criteria(wrong_minmax, self.md, self.nulls, test_messages)
self.assertEqual(test_criteria, {})
self.assertEqual(
test_messages,
['For min-max subsetting, two-items list need: no min (or no max) should be "None"'])
def test_do_filtering(self):
md_abx_filt_y = pd.DataFrame({'antibiotic_history': ['Yes'], 'col2': [1.], 'col3': ['1.3']})
test_name, test_boolean, test_md_abx_y = do_filtering(self.md, 'antibiotic_history', '1', ['Yes'], [], [])
md_abx_filt_y.col3 = md_abx_filt_y.col3.astype('object')
test_md_abx_y.col3 = md_abx_filt_y.col3.astype('object')
self.assertEqual(test_name, 'antibiotic_history')
self.assertEqual(test_boolean, False)
assert_frame_equal(md_abx_filt_y, test_md_abx_y)
md_abx_filt_n = pd.DataFrame({'antibiotic_history': ['No', 'No'], 'col2': [2., 3.], 'col3': [1, 'missing']})
test_name, test_boolean, test_md_abx_x = do_filtering(self.md, 'antibiotic_history', '1', ['No'], [], [])
md_abx_filt_n.index = range(md_abx_filt_n.shape[0])
test_md_abx_x.index = range(test_md_abx_x.shape[0])
self.assertEqual(test_name, 'antibiotic_history')
self.assertEqual(test_boolean, False)
assert_frame_equal(md_abx_filt_n, test_md_abx_x)
md_abx_filt_n = pd.DataFrame({'antibiotic_history': ['No', 'No'], 'col2': [2., 3.], 'col3': [1, 'missing']})
test_name, test_boolean, test_md_abx_x = do_filtering(self.md, 'antibiotic_history', '0', ['Yes'], [], [])
md_abx_filt_n.index = range(md_abx_filt_n.shape[0])
test_md_abx_x.index = range(test_md_abx_x.shape[0])
self.assertEqual(test_name, 'No_antibiotic_history')
self.assertEqual(test_boolean, False)
assert_frame_equal(md_abx_filt_n, test_md_abx_x)
md_abx_filt_mm = pd.DataFrame({'antibiotic_history': ['Yes', 'No'], 'col2': [1., 2.], 'col3': ['1.3', '1']})
test_name, test_boolean, test_md_abx_mm = do_filtering(self.md, 'col2', '2', ['None', 3], ['col2'], [])
# weirdly, if the two col3 contents are "object" and identical, the test fails, hence:
md_abx_filt_mm.col3 = md_abx_filt_mm.col3.astype('float')
test_md_abx_mm.col3 = test_md_abx_mm.col3.astype('float')
md_abx_filt_mm.index = range(md_abx_filt_mm.shape[0])
test_md_abx_mm.index = range(test_md_abx_mm.shape[0])
self.assertEqual(test_name, 'Range_col2')
self.assertEqual(test_boolean, False)
| assert_frame_equal(md_abx_filt_mm, test_md_abx_mm) | pandas.testing.assert_frame_equal |
import glob
import pandas as pd
import os
def merge():
if os.path.isfile('data.csv'):
os.remove('data.csv')
files = glob.glob("*.csv")
columns = ['Bus Body','Date','Packet','Slot','Latitude','Longitude','Place']
df = []
for file in files:
data = | pd.read_csv(file) | pandas.read_csv |
from pymongo import MongoClient
import pandas as pd
from collections import Counter
# NLP libraries
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import string
import csv
import json
# from datetime import datetime
import datetime
from collections import deque
import pymongo
"""TIME SERIES DESCRIPTIVE ANALYSIS SECTION"""
"""TIME SERIES DESCRIPTIVE ANALYSIS - RANSOMWARE HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesRansomware():
print("Finding tweets with #ransomware hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"ransomware",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex':'ransomware','$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = pd.Series(ones, index=idx)
print(timeSeries01.head())
print("Counting tweets per day - executing descriptive analysis - Re-sampling / Bucketing..")
# Resampling / bucketing
per_day = timeSeries01.resample('1D').sum().fillna(0)
print('Time Series created:')
print(per_day.head())
print('Creating data frame..')
s = pd.DataFrame(per_day)
print('Data frame:')
print(s.head())
print('Writing CSV file for time series analysis of tweets with Ransomware hashtags')
s.to_csv('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesRansomware.csv')
print('Writing Ransomware Time Series Descriptive Analysis CSV file completed!')
# function for converting CSV to JSON
def csvToJsonRansomware():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonTimeSeries = []
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesRansomware.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
for row in readCSV:
row[0] = row[0] + ' 14:00:00.000'
datetimeObject = datetime.datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
millisec = datetimeObject.timestamp() * 1000
row[0] = millisec
row[1] = int(float(row[1]))
# print(row)
jsonTimeSeries.append(row)
# removing the head (first object) with not useful data - Data cleaning
del jsonTimeSeries[0]
# print('New file --> Time Series:')
# print(jsonTimeSeries)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesRansomware.json', 'w') as file:
json.dump(jsonTimeSeries, file, indent=4)
print('Writing Time Series Ransomware JSON file completed!')
print()
print('Next:')
"""TIME SERIES DESCRIPTIVE ANALYSIS - MALWARE HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesMalware():
print("Finding tweets with #malware hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"malware",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex': 'malware', '$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = pd.Series(ones, index=idx)
print(timeSeries01.head())
print("Counting tweets per day - executing descriptive analysis - Re-sampling / Bucketing..")
# Resampling / bucketing
per_day = timeSeries01.resample('1D').sum().fillna(0)
print('Time Series created:')
print(per_day.head())
print('Creating data frame..')
s = pd.DataFrame(per_day)
print('Data frame:')
print(s.head())
print('Writing CSV file for time series analysis of tweets with Malware hashtags')
s.to_csv('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesMalware.csv')
print('Writing Malware Time Series Descriptive Analysis CSV file completed!')
# function for converting CSV to JSON
def csvToJsonMalware():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonTimeSeries = []
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesMalware.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
for row in readCSV:
row[0] = row[0] + ' 14:00:00.000'
datetimeObject = datetime.datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
millisec = datetimeObject.timestamp() * 1000
row[0] = millisec
row[1] = int(float(row[1]))
# print(row)
jsonTimeSeries.append(row)
# removing the head (first object) with not useful data - Data cleaning
del jsonTimeSeries[0]
# print('New file --> Time Series:')
# print(jsonTimeSeries)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesMalware.json', 'w') as file:
json.dump(jsonTimeSeries, file, indent=4)
print('Writing Time Series Malware JSON file completed!')
print()
print('Next:')
"""TIME SERIES DESCRIPTIVE ANALYSIS - TROJAN HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesTrojan():
print("Finding tweets with #trojan hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"trojan",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex': 'trojan', '$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = pd.Series(ones, index=idx)
print(timeSeries01.head())
print("Counting tweets per day - executing descriptive analysis - Re-sampling / Bucketing..")
# Resampling / bucketing
per_day = timeSeries01.resample('1D').sum().fillna(0)
print('Time Series created:')
print(per_day.head())
print('Creating data frame..')
s = pd.DataFrame(per_day)
print('Data frame:')
print(s.head())
print('Writing CSV file..')
s.to_csv('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesTrojan.csv')
print('Writing Trojan Time Series Descriptive Analysis CSV file completed!')
# function for converting CSV to JSON
def csvToJsonTrojan():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonTimeSeries = []
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesTrojan.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
for row in readCSV:
row[0] = row[0] + ' 14:00:00.000'
datetimeObject = datetime.datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
millisec = datetimeObject.timestamp() * 1000
row[0] = millisec
row[1] = int(float(row[1]))
# print(row)
jsonTimeSeries.append(row)
# removing the head (first object) with not useful data - Data cleaning
del jsonTimeSeries[0]
# print('New file --> Time Series:')
# print(jsonTimeSeries)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesTrojan.json', 'w') as file:
json.dump(jsonTimeSeries, file, indent=4)
print('Writing Time Series Trojan JSON file completed!')
print()
print('Next:')
"""TIME SERIES DESCRIPTIVE ANALYSIS - BOTNET HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesBotnet():
print("Finding tweets with #botnet hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"botnet",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex': 'botnet', '$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = pd.Series(ones, index=idx)
print(timeSeries01.head())
print("Counting tweets per day - executing descriptive analysis - Re-sampling / Bucketing..")
# Resampling / bucketing
per_day = timeSeries01.resample('1D').sum().fillna(0)
print('Time Series created:')
print(per_day.head())
print('Creating data frame..')
s = pd.DataFrame(per_day)
print('Data frame:')
print(s.head())
print('Writing CSV file..')
s.to_csv('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesBotnet.csv')
print('Writing Botnet Time Series Descriptive Analysis CSV file completed!')
# function for converting CSV to JSON
def csvToJsonBotnet():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonTimeSeries = []
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesBotnet.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
for row in readCSV:
row[0] = row[0] + ' 14:00:00.000'
datetimeObject = datetime.datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
millisec = datetimeObject.timestamp() * 1000
row[0] = millisec
row[1] = int(float(row[1]))
# print(row)
jsonTimeSeries.append(row)
# removing the head (first object) with not useful data - Data cleaning
del jsonTimeSeries[0]
# print('New file --> Time Series:')
# print(jsonTimeSeries)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesBotnet.json', 'w') as file:
json.dump(jsonTimeSeries, file, indent=4)
print('Writing Time Series Botnet JSON file completed!')
print()
print('Next:')
"""TIME SERIES DESCRIPTIVE ANALYSIS - PHISHING HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesPhishing():
print("Finding tweets with #phishing hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"phishing",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex': 'phishing', '$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = | pd.DatetimeIndex(datesQuery) | pandas.DatetimeIndex |
"""
Authors: <NAME> @dshemetov, <NAME> @jsharpna
"""
from io import BytesIO
from os.path import join, isfile
from zipfile import ZipFile
import requests
import pandas as pd
import numpy as np
# Source files
INPUT_DIR = "./old_source_files"
OUTPUT_DIR = "../../delphi_utils/data"
FIPS_BY_ZIP_POP_URL = (
"https://www2.census.gov/geo/docs/maps-data/data/rel/zcta_county_rel_10.txt?#"
)
ZIP_HSA_HRR_URL = (
"https://atlasdata.dartmouth.edu/downloads/geography/ZipHsaHrr18.csv.zip"
)
ZIP_HSA_HRR_FILENAME = "ZipHsaHrr18.csv"
FIPS_MSA_URL = "https://www2.census.gov/programs-surveys/metro-micro/geographies/reference-files/2018/delineation-files/list1_Sep_2018.xls"
JHU_FIPS_URL = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
STATE_CODES_URL = "http://www2.census.gov/geo/docs/reference/state.txt?#"
FIPS_POPULATION_URL = "https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.csv"
FIPS_PUERTO_RICO_POPULATION_URL = (
"https://www2.census.gov/geo/docs/maps-data/data/rel/zcta_county_rel_10.txt?"
)
STATE_HHS_FILE = "hhs.txt"
# Out files
FIPS_STATE_OUT_FILENAME = "fips_state_table.csv"
FIPS_MSA_OUT_FILENAME = "fips_msa_table.csv"
FIPS_HRR_OUT_FILENAME = "fips_hrr_table.csv"
FIPS_ZIP_OUT_FILENAME = "fips_zip_table.csv"
FIPS_HHS_FILENAME = "fips_hhs_table.csv"
FIPS_POPULATION_OUT_FILENAME = "fips_pop.csv"
ZIP_HSA_OUT_FILENAME = "zip_hsa_table.csv"
ZIP_HRR_OUT_FILENAME = "zip_hrr_table.csv"
ZIP_FIPS_OUT_FILENAME = "zip_fips_table.csv"
ZIP_MSA_OUT_FILENAME = "zip_msa_table.csv"
ZIP_POPULATION_OUT_FILENAME = "zip_pop.csv"
ZIP_STATE_CODE_OUT_FILENAME = "zip_state_code_table.csv"
ZIP_HHS_FILENAME = "zip_hhs_table.csv"
STATE_OUT_FILENAME = "state_codes_table.csv"
STATE_HHS_OUT_FILENAME = "state_code_hhs_table.csv"
STATE_POPULATION_OUT_FILENAME = "state_pop.csv"
HHS_POPULATION_OUT_FILENAME = "hhs_pop.csv"
NATION_POPULATION_OUT_FILENAME = "nation_pop.csv"
JHU_FIPS_OUT_FILENAME = "jhu_uid_fips_table.csv"
def create_fips_zip_crosswalk():
"""
Creates the (weighted) crosswalk tables between FIPS to ZIP and ZIP to FIPS
from source.
"""
pop_df = pd.read_csv(FIPS_BY_ZIP_POP_URL)
# Create the FIPS column by combining the state and county codes
state_codes = pop_df["STATE"].astype(str).str.zfill(2)
county_codes = pop_df["COUNTY"].astype(str).str.zfill(3)
pop_df["fips"] = state_codes + county_codes
# Create the ZIP column by adding leading zeros to the ZIP
pop_df["zip"] = pop_df["ZCTA5"].astype(str).str.zfill(5)
# Pare down the dataframe to just the relevant columns: zip, fips, and population
pop_df = pop_df[["zip", "fips", "POPPT"]].rename(columns={"POPPT": "pop"})
# Find the population fractions (the heaviest computation, takes about a minute)
# Note that the denominator in the fractions is the source population
pop_df.set_index(["fips", "zip"], inplace=True)
fips_zip = pop_df.groupby("fips", as_index=False).apply(
lambda g: g["pop"] / g["pop"].sum()
)
zip_fips = pop_df.groupby("zip", as_index=False).apply(
lambda g: g["pop"] / g["pop"].sum()
)
# Rename and write to file
fips_zip = fips_zip.reset_index(level=["fips", "zip"]).rename(
columns={"pop": "weight"}
)
fips_zip = fips_zip[fips_zip["weight"] > 0.0]
fips_zip.to_csv(join(OUTPUT_DIR, FIPS_ZIP_OUT_FILENAME), index=False)
zip_fips = zip_fips.reset_index(level=["fips", "zip"]).rename(
columns={"pop": "weight"}
)
zip_fips = zip_fips[zip_fips["weight"] > 0.0]
zip_fips.to_csv(join(OUTPUT_DIR, ZIP_FIPS_OUT_FILENAME), index=False)
def create_zip_hsa_hrr_crosswalk():
"""Creates the crosswalk table from ZIP to HSA and from ZIP to HRR from source."""
zipped_csv = ZipFile(BytesIO(requests.get(ZIP_HSA_HRR_URL).content))
zip_df = pd.read_csv(zipped_csv.open(ZIP_HSA_HRR_FILENAME))
# Build the HSA table
hsa_df = zip_df[["zipcode18", "hsanum"]].rename(
columns={"zipcode18": "zip", "hsanum": "hsa"}
)
# Build the HRR table
hrr_df = zip_df[["zipcode18", "hrrnum"]].rename(
columns={"zipcode18": "zip", "hrrnum": "hrr"}
)
# Convert to zero-padded strings
hrr_df["zip"] = hrr_df["zip"].astype(str).str.zfill(5)
hrr_df["hrr"] = hrr_df["hrr"].astype(str)
hsa_df["zip"] = hsa_df["zip"].astype(str).str.zfill(5)
hsa_df["hsa"] = hsa_df["hsa"].astype(str)
hsa_df.to_csv(join(OUTPUT_DIR, ZIP_HSA_OUT_FILENAME), index=False)
hrr_df.to_csv(join(OUTPUT_DIR, ZIP_HRR_OUT_FILENAME), index=False)
def create_fips_msa_crosswalk():
"""Creates the crosswalk table from FIPS to MSA from source."""
msa_cols = {
"CBSA Code": int,
"Metropolitan/Micropolitan Statistical Area": str,
"FIPS State Code": str,
"FIPS County Code": str,
}
# The following line requires the xlrd package.
msa_df = pd.read_excel(
FIPS_MSA_URL,
skiprows=2,
skipfooter=4,
usecols=msa_cols.keys(),
dtype=msa_cols,
)
metro_bool = (
msa_df["Metropolitan/Micropolitan Statistical Area"]
== "Metropolitan Statistical Area"
)
msa_df = msa_df[metro_bool]
# Combine state and county codes into a single FIPS code
msa_df["fips"] = msa_df["FIPS State Code"].str.cat(msa_df["FIPS County Code"])
msa_df.rename(columns={"CBSA Code": "msa"})[["fips", "msa"]].to_csv(
join(OUTPUT_DIR, FIPS_MSA_OUT_FILENAME), index=False
)
def create_jhu_uid_fips_crosswalk():
"""Creates the crosswalk table from JHU UID to FIPS from source."""
# These are hand modifications that need to be made to the translation
# between JHU UID and FIPS. See below for the special cases information
# https://cmu-delphi.github.io/delphi-epidata/api/covidcast-signals/jhu-csse.html#geographical-exceptions
hand_additions = pd.DataFrame(
[
# Split aggregation of Dukes and Nantucket, Massachusetts
{
"jhu_uid": "84070002",
"fips": "25007",
"weight": 16535 / (16535 + 10172),
}, # Population: 16535
{
"jhu_uid": "84070002",
"fips": "25019",
"weight": 10172 / (16535 + 10172),
}, # 10172
# Kansas City, Missouri
{
"jhu_uid": "84070003",
"fips": "29095",
"weight": 674158 / 1084897,
}, # Population: 674158
{
"jhu_uid": "84070003",
"fips": "29165",
"weight": 89322 / 1084897,
}, # 89322
{
"jhu_uid": "84070003",
"fips": "29037",
"weight": 99478 / 1084897,
}, # 99478
{
"jhu_uid": "84070003",
"fips": "29047",
"weight": 221939 / 1084897,
}, # 221939
# Kusilvak, Alaska
{"jhu_uid": "84002158", "fips": "02270", "weight": 1.0},
# Oglala Lakota
{"jhu_uid": "84046102", "fips": "46113", "weight": 1.0},
# Aggregate Utah territories into a "State FIPS"
{"jhu_uid": "84070015", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070016", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070017", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070018", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070019", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070020", "fips": "49000", "weight": 1.0},
]
)
unassigned_states = pd.DataFrame(
[
# Map the Unassigned category to a custom megaFIPS XX000
{"jhu_uid": str(x), "fips": str(x)[-2:].ljust(5, "0"), "weight": 1.0}
for x in range(84090001, 84090057)
]
)
out_of_state = pd.DataFrame(
[
# Map the Out of State category to a custom megaFIPS XX000
{"jhu_uid": str(x), "fips": str(x)[-2:].ljust(5, "0"), "weight": 1.0}
for x in range(84080001, 84080057)
]
)
puerto_rico_unassigned = pd.DataFrame(
[
# Map the Unassigned and Out of State categories to the cusom megaFIPS 72000
{"jhu_uid": "63072888", "fips": "72000", "weight": 1.0},
{"jhu_uid": "63072999", "fips": "72000", "weight": 1.0},
]
)
cruise_ships = pd.DataFrame(
[
{"jhu_uid": "84088888", "fips": "88888", "weight": 1.0},
{"jhu_uid": "84099999", "fips": "99999", "weight": 1.0},
]
)
jhu_df = (
pd.read_csv(JHU_FIPS_URL, dtype={"UID": str, "FIPS": str})
.query("Country_Region == 'US'")[["UID", "FIPS"]]
.rename(columns={"UID": "jhu_uid", "FIPS": "fips"})
.dropna(subset=["fips"])
)
# FIPS Codes that are just two digits long should be zero filled on the right.
# These are US state codes (XX) and the territories Guam (66), Northern Mariana Islands (69),
# Virgin Islands (78), and Puerto Rico (72).
fips_st = jhu_df["fips"].str.len() <= 2
jhu_df.loc[fips_st, "fips"] = jhu_df.loc[fips_st, "fips"].str.ljust(5, "0")
# Drop the JHU UIDs that were hand-modified
dup_ind = jhu_df["jhu_uid"].isin(
pd.concat(
[hand_additions, unassigned_states, out_of_state, puerto_rico_unassigned, cruise_ships]
)["jhu_uid"].values
)
jhu_df.drop(jhu_df.index[dup_ind], inplace=True)
# Add weights of 1.0 to everything not in hand additions, then merge in hand-additions
# Finally, zero fill FIPS
jhu_df["weight"] = 1.0
jhu_df = pd.concat(
(
jhu_df,
hand_additions,
unassigned_states,
out_of_state,
puerto_rico_unassigned,
)
)
jhu_df["fips"] = jhu_df["fips"].astype(int).astype(str).str.zfill(5)
jhu_df.to_csv(join(OUTPUT_DIR, JHU_FIPS_OUT_FILENAME), index=False)
def create_state_codes_crosswalk():
"""Create the State ID -> State Name -> State code crosswalk file."""
df = (
pd.read_csv(STATE_CODES_URL, delimiter="|")
.drop(columns="STATENS")
.rename(
columns={
"STATE": "state_code",
"STUSAB": "state_id",
"STATE_NAME": "state_name",
}
)
)
df["state_code"] = df["state_code"].astype(str).str.zfill(2)
df["state_id"] = df["state_id"].astype(str).str.lower()
# Add a few extra US state territories manually
territories = pd.DataFrame(
[
{
"state_code": 70,
"state_name": "Republic of Palau",
"state_id": "pw",
},
{
"state_code": 68,
"state_name": "Marshall Islands",
"state_id": "mh",
},
{
"state_code": 64,
"state_name": "Federated States of Micronesia",
"state_id": "fm",
},
]
)
df = pd.concat((df, territories))
df.to_csv(join(OUTPUT_DIR, STATE_OUT_FILENAME), index=False)
def create_state_hhs_crosswalk():
"""
Create the state to hhs crosswalk.
"""
if not isfile(join(OUTPUT_DIR, STATE_OUT_FILENAME)):
create_state_codes_crosswalk()
ss_df = pd.read_csv(
join(OUTPUT_DIR, STATE_OUT_FILENAME),
dtype={"state_code": str, "state_name": str, "state_id": str},
)
with open(STATE_HHS_FILE) as temp_file:
temp = temp_file.readlines()
# Process text from https://www.hhs.gov/about/agencies/iea/regional-offices/index.html
temp = [int(s[7:9]) if "Region" in s else s for s in temp]
temp = [s.strip().split(", ") if isinstance(s, str) else s for s in temp]
temp = {temp[i]: temp[i + 1] for i in range(0, len(temp), 2)}
temp = {key: [x.lstrip(" and") for x in temp[key]] for key in temp}
temp = [[(key, x) for x in temp[key]] for key in temp]
hhs_state_pairs = [x for y in temp for x in y]
# Make naming adjustments
hhs_state_pairs.remove((2, "the Virgin Islands"))
hhs_state_pairs.append((2, "U.S. Virgin Islands"))
hhs_state_pairs.remove((9, "Commonwealth of the Northern Mariana Islands"))
hhs_state_pairs.append((9, "Northern Mariana Islands"))
# Make dataframe
hhs_df = pd.DataFrame(hhs_state_pairs, columns=["hhs", "state_name"])
hhs_df["hhs"] = hhs_df["hhs"].astype(str)
(
ss_df.merge(hhs_df, on="state_name", how="left")
.dropna()[["state_code", "hhs"]]
.to_csv(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME), index=False)
)
def create_fips_population_table():
"""
Build a table of populations by FIPS county codes. Uses US Census Bureau population
data from 2019, supplemented with 2010 population data for Puerto Rico, and a few
small counties.
"""
census_pop = pd.read_csv(FIPS_POPULATION_URL, encoding="ISO-8859-1")
census_pop["fips"] = census_pop.apply(
lambda x: f"{x['STATE']:02d}{x['COUNTY']:03d}", axis=1
)
census_pop["pop"] = census_pop["POPESTIMATE2019"]
census_pop = census_pop[["fips", "pop"]]
census_pop = pd.concat(
[
census_pop,
pd.DataFrame(
{
"fips": ["70002", "70003"],
"pop": [0, 0],
}
),
]
)
census_pop = census_pop.reset_index(drop=True)
# Set population for Dukes and Nantucket
dn_fips = "70002"
dukes_fips = "25007"
nantu_fips = "25019"
census_pop.loc[census_pop["fips"] == dn_fips, "pop"] = (
census_pop.loc[census_pop["fips"] == dukes_fips, "pop"].values
+ census_pop.loc[census_pop["fips"] == nantu_fips, "pop"].values
)
# Set population for Kansas City
census_pop.loc[census_pop["fips"] == "70003", "pop"] = 491918 # via Google
# Get the file with Puerto Rico populations
df_pr = pd.read_csv(FIPS_PUERTO_RICO_POPULATION_URL)
df_pr["fips"] = df_pr["STATE"].astype(str).str.zfill(2) + df_pr["COUNTY"].astype(
str
).str.zfill(3)
df_pr["pop"] = df_pr["POPPT"]
df_pr = df_pr[["fips", "pop"]]
# Create the Puerto Rico megaFIPS
df_pr = df_pr[df_pr["fips"].isin([str(x) for x in range(72000, 72999)])]
df_pr = pd.concat(
[df_pr, pd.DataFrame([{"fips": "72000", "pop": df_pr["pop"].sum()}])]
)
# Fill the missing Puerto Rico data with 2010 information
df_pr = df_pr.groupby("fips").sum().reset_index()
df_pr = df_pr[~df_pr["fips"].isin(census_pop["fips"])]
census_pop_pr = pd.concat([census_pop, df_pr])
# Filled from https://www.census.gov/data/tables/2010/dec/2010-island-areas.html
territories_pop = pd.DataFrame({
"fips": ["60010", "60020", "60030", "60040", "60050", "66010", "78010", "78020", "78030", "69085", "69100", "69110", "69120"],
"pop": [23030, 1143, 0, 17, 31329, 159358, 50601, 4170, 51634, 0, 2527, 48220, 3136]
})
census_pop_territories = | pd.concat([census_pop_pr, territories_pop]) | pandas.concat |
#Cntrl+C #Cntrl+V #PYCODING
#1
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98] }
import pandas as pd
import numpy as np
brics = pd.DataFrame(dict)
print(brics)
#2
s = pd.Series([1, 3, 5, np.nan, 6, 8])
print(s)
#3
import pandas as pd
import numpy as np
dates = | pd.date_range("20210101", periods=12) | pandas.date_range |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
expected = pd.Series([0.2, 0.7, 1.2, 1.6, 2., 2.5], index=expected_index)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0, 0, 0, 20, 20, 20], 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 0, 0, 4, 4, 4], 'y', [50], [2]),
# invalid axis
pytest.param([0, 0, 0, 4, 4, 4], 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic(site_metadata, interval_label, obs_values,
axis, constant_values, expected_values):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = | pd.Timestamp('20190513 1200', tz=tz) | pandas.Timestamp |
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
from datetime import datetime, timedelta
import requests
import json
import time
def read():
df1 = pd.read_csv("CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv")
df1.columns = ['date', 'ETH', 'BTC']
df1.date = pd.to_datetime(df1.date, dayfirst=True)
df1.set_index('date', inplace=True)
EOS = pd.read_csv("ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv")
IOTA = pd.read_csv("ICO_coins/IOTA_USD_2018-06-06_2020-04-02-CoinDesk.csv")
LSK = pd.read_csv("ICO_coins/LSK_USD_2018-06-06_2020-04-02-CoinDesk.csv")
NEO = pd.read_csv("ICO_coins/NEO_USD_2018-06-06_2020-04-02-CoinDesk.csv")
TRX = pd.read_csv("ICO_coins/tron/TRX_USD_2018-06-06_2020-04-02-CoinDesk.csv")
ADA = pd.read_csv("ICO_coins/cardano/ADA_USD_2018-06-06_2020-04-02-CoinDesk.csv")
GOLD = | pd.read_csv("CSV/XAU-GOLD_USD_Historical Data_2018-06-06--2020-04-04.csv") | pandas.read_csv |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
| assert_series_equal(i, i_rec) | pandas.util.testing.assert_series_equal |
import os
import re
import sys
import time
import math
from collections import Counter
from functools import partial
from tempfile import mkdtemp, NamedTemporaryFile
import logging
import multiprocessing as mp
# "hidden" features, in development
try:
import MOODS.tools
import MOODS.parsers
import MOODS.scan
except ImportError:
pass
from genomepy import Genome
from diskcache import Cache
import numpy as np
from sklearn.preprocessing import scale
import pandas as pd
import sqlite3
from gimmemotifs import __version__
from gimmemotifs.background import RandomGenomicFasta, gc_bin_bedfile
from gimmemotifs.config import MotifConfig, CACHE_DIR
from gimmemotifs.fasta import Fasta
from gimmemotifs.c_metrics import pwmscan
from gimmemotifs.motif import read_motifs
from gimmemotifs.utils import parse_cutoff, as_fasta, file_checksum, rc
try:
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
except Exception:
pass
# only used when using cache, should not be a requirement
try:
from dogpile.cache import make_region
import xxhash
except ImportError:
pass
logger = logging.getLogger("gimme.scanner")
config = MotifConfig()
FPR = 0.01
lock = mp.Lock()
def print_cluster_error_message():
logger.error("Cache is corrupted.")
logger.error(
"This can happen when you try to run a GimmeMotifs tool in parallel on a cluster."
)
logger.error(f"To solve this, delete the GimmeMotifs cache directory: {CACHE_DIR}")
logger.error("and then see here for a workaround:")
logger.error(
"https://gimmemotifs.readthedocs.io/en/master/faq.html#sqlite-error-when-running-on-a-cluster"
)
def _format_line(
seq, seq_id, motif, score, pos, strand, bed=False, seq_p=None, strandmap=None
):
if seq_p is None:
seq_p = re.compile(r"([^\s:]+):(\d+)-(\d+)")
if strandmap is None:
strandmap = {-1: "-", 1: "+"}
if bed:
m = seq_p.search(seq_id)
if m:
chrom = m.group(1)
start = int(m.group(2))
return "{}\t{}\t{}\t{}\t{}\t{}".format(
chrom,
start + pos,
start + pos + len(motif),
motif.id,
score,
strandmap[strand],
)
else:
return "{}\t{}\t{}\t{}\t{}\t{}".format(
seq_id, pos, pos + len(motif), motif.id, score, strandmap[strand]
)
else:
return '{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\tmotif_name "{}" ; motif_instance "{}"'.format(
seq_id,
"pfmscan",
"misc_feature",
pos + 1, # GFF is 1-based
pos + len(motif),
score,
strandmap[strand],
".",
motif.id,
seq[pos : pos + len(motif)],
)
def scan_regionfile_to_table(
input_table, genome, scoring, pfmfile=None, ncpus=None, zscore=True, gc=True
):
"""Scan regions in input table with motifs.
Parameters
----------
input_table : str
Filename of input table. Can be either a text-separated tab file or a
feather file.
genome : str
Genome name. Can be either the name of a FASTA-formatted file or a
genomepy genome name.
scoring : str
"count" or "score"
pfmfile : str, optional
Specify a PFM file for scanning.
ncpus : int, optional
If defined this specifies the number of cores to use.
Returns
-------
table : pandas.DataFrame
DataFrame with motif ids as column names and regions as index. Values
are either counts or scores depending on the 'scoring' parameter.s
"""
config = MotifConfig()
if pfmfile is None:
pfmfile = config.get_default_params().get("motif_db", None)
if pfmfile is not None:
pfmfile = os.path.join(config.get_motif_dir(), pfmfile)
if pfmfile is None:
raise ValueError("no pfmfile given and no default database specified")
logger.info("reading table")
if input_table.endswith("feather"):
df = pd.read_feather(input_table)
idx = df.iloc[:, 0].values
else:
df = pd.read_table(input_table, index_col=0, comment="#")
idx = df.index
regions = list(idx)
if len(regions) >= 1000:
check_regions = np.random.choice(regions, size=1000, replace=False)
else:
check_regions = regions
size = int(
np.median([len(seq) for seq in as_fasta(check_regions, genome=genome).seqs])
)
s = Scanner(ncpus=ncpus)
s.set_motifs(pfmfile)
s.set_genome(genome)
s.set_background(genome=genome, gc=gc, size=size)
scores = []
if scoring == "count":
logger.info("setting threshold")
s.set_threshold(fpr=FPR)
logger.info("creating count table")
for row in s.count(regions):
scores.append(row)
logger.info("done")
else:
s.set_threshold(threshold=0.0)
msg = "creating score table"
if zscore:
msg += " (z-score"
if gc:
msg += ", GC%"
msg += ")"
else:
msg += " (logodds)"
logger.info(msg)
for row in s.best_score(regions, zscore=zscore, gc=gc):
scores.append(row)
logger.info("done")
motif_names = [m.id for m in read_motifs(pfmfile)]
logger.info("creating dataframe")
dtype = "float16"
if scoring == "count":
dtype = int
df = pd.DataFrame(scores, index=idx, columns=motif_names, dtype=dtype)
return df
def scan_table(
s, inputfile, fa, motifs, cutoff, bgfile, nreport, scan_rc, pvalue, moods
):
# header
yield "\t{}".format("\t".join([m.id for m in motifs]))
table = True
if moods:
result_it = scan_it_moods(
inputfile, motifs, cutoff, bgfile, nreport, scan_rc, pvalue, table
)
for seq_id, counts in result_it:
yield "{}\t{}".format(seq_id, "\t".join([str(x) for x in counts]))
else:
# get iterator
result_it = s.count(fa, nreport, scan_rc)
# counts table
for i, counts in enumerate(result_it):
yield "{}\t{}".format(fa.ids[i], "\t".join([str(x) for x in counts]))
def scan_score_table(s, fa, motifs, scan_rc, zscore=False, gcnorm=False):
s.set_threshold(threshold=0.0, gc=gcnorm)
# get iterator
result_it = s.best_score(fa, scan_rc, zscore=zscore, gc=gcnorm)
# header
yield "\t{}".format("\t".join([m.id for m in motifs]))
# score table
for i, scores in enumerate(result_it):
yield "{}\t{}".format(fa.ids[i], "\t".join(["{:4f}".format(x) for x in scores]))
def scan_normal(
s,
inputfile,
fa,
motifs,
cutoff,
bgfile,
nreport,
scan_rc,
pvalue,
moods,
bed,
zscore,
gcnorm,
):
table = False
if moods:
result_it = scan_it_moods(
inputfile, motifs, cutoff, bgfile, nreport, scan_rc, pvalue, table
)
for motif, d in result_it:
for seq_id, matches in d.items():
for pos, score, strand in matches:
yield _format_line(
fa[seq_id], seq_id, motif, score, pos, strand, bed=bed
)
else:
result_it = s.scan(fa, nreport, scan_rc, zscore, gc=gcnorm)
for i, result in enumerate(result_it):
seq_id = fa.ids[i]
seq = fa[seq_id]
for motif, matches in zip(motifs, result):
for (score, pos, strand) in matches:
yield _format_line(seq, seq_id, motif, score, pos, strand, bed=bed)
def command_scan(
inputfile,
pfmfile,
nreport=1,
fpr=0.01,
cutoff=None,
bed=False,
scan_rc=True,
table=False,
score_table=False,
moods=False,
pvalue=None,
bgfile=None,
genome=None,
ncpus=None,
zscore=False,
gcnorm=False,
):
motifs = read_motifs(pfmfile)
fa = as_fasta(inputfile, genome)
# initialize scanner
s = Scanner(ncpus=ncpus)
s.set_motifs(pfmfile)
if genome:
s.set_genome(genome=genome)
if genome:
s.set_background(
genome=genome, fname=bgfile, size=fa.median_length(), gc=gcnorm
)
if bgfile:
s.set_background(genome=genome, fname=bgfile, size=fa.median_length())
if not score_table:
s.set_threshold(fpr=fpr, threshold=cutoff)
if table:
it = scan_table(
s, inputfile, fa, motifs, cutoff, bgfile, nreport, scan_rc, pvalue, moods
)
elif score_table:
it = scan_score_table(s, fa, motifs, scan_rc, zscore=zscore, gcnorm=gcnorm)
else:
it = scan_normal(
s,
inputfile,
fa,
motifs,
cutoff,
bgfile,
nreport,
scan_rc,
pvalue,
moods,
bed,
zscore=zscore,
gcnorm=gcnorm,
)
for row in it:
yield row
def scan_to_file(
inputfile,
pfmfile,
filepath_or_buffer=None,
nreport=1,
fpr=0.01,
cutoff=None,
bed=False,
scan_rc=True,
table=False,
score_table=False,
moods=False,
pvalue=False,
bgfile=None,
genome=None,
ncpus=None,
zscore=True,
gcnorm=True,
):
"""Scan an inputfile with motifs."""
should_close = False
if filepath_or_buffer is None:
fo = sys.stdout
else:
if hasattr(filepath_or_buffer, "write"):
fo = filepath_or_buffer
else:
try:
fo = open(os.path.expanduser(filepath_or_buffer), "w")
should_close = True
except Exception:
logger.error(f"Could not open {filepath_or_buffer} for writing")
sys.exit(1)
if fpr is None and cutoff is None:
fpr = 0.01
print("# GimmeMotifs version {}".format(__version__), file=fo)
print("# Input: {}".format(inputfile), file=fo)
print("# Motifs: {}".format(pfmfile), file=fo)
if fpr and not score_table:
if genome is not None:
print("# FPR: {} ({})".format(fpr, genome), file=fo)
elif bgfile:
print("# FPR: {} ({})".format(fpr, bgfile), file=fo)
if cutoff is not None:
print("# Threshold: {}".format(cutoff), file=fo)
if zscore:
if gcnorm:
print("# Scoring: GC frequency normalized z-score", file=fo)
else:
print("# Scoring: normalized z-score", file=fo)
else:
print("# Scoring: logodds score", file=fo)
for line in command_scan(
inputfile,
pfmfile,
nreport=nreport,
fpr=fpr,
cutoff=cutoff,
bed=bed,
scan_rc=scan_rc,
table=table,
score_table=score_table,
moods=moods,
pvalue=pvalue,
bgfile=bgfile,
genome=genome,
ncpus=ncpus,
zscore=zscore,
gcnorm=gcnorm,
):
print(line, file=fo)
if should_close:
try:
fo.close()
except Exception:
pass
def scan_to_best_match(
fname, motifs, ncpus=None, genome=None, score=False, zscore=False, gc=False
):
"""Scan a FASTA file with motifs.
Scan a FASTA file and return a dictionary with the best match per motif.
Parameters
----------
fname : str
Filename of a sequence file in FASTA format.
motifs : list
List of motif instances.
Returns
-------
result : dict
Dictionary with motif scanning results.
"""
# Initialize scanner
s = Scanner(ncpus=ncpus)
s.set_motifs(motifs)
s.set_threshold(threshold=0.0)
if genome:
s.set_genome(genome)
if isinstance(motifs, str):
motifs = read_motifs(motifs)
logger.debug("scanning %s...", fname)
result = dict([(m.id, []) for m in motifs])
if score:
it = s.best_score(fname, zscore=zscore, gc=gc)
else:
it = s.best_match(fname, zscore=zscore, gc=gc)
for scores in it:
for motif, score in zip(motifs, scores):
result[motif.id].append(score)
# Close the pool and reclaim memory
del s
return result
def parse_threshold_values(motif_file, cutoff):
motifs = read_motifs(motif_file)
d = parse_cutoff(motifs, cutoff)
threshold = {}
for m in motifs:
c = m.pwm_min_score() + (m.pwm_max_score() - m.pwm_min_score()) * d[m.id]
threshold[m.id] = c
return threshold
def scan_sequence(
seq, seq_gc_bin, motifs, nreport, scan_rc, motifs_meanstd=None, zscore=False
):
ret = []
# scan for motifs
for motif, cutoff in motifs:
if cutoff is None:
ret.append([])
else:
if zscore:
m_mean, m_std = motifs_meanstd[seq_gc_bin][motif.id]
result = pwmscan(
seq, motif.logodds, motif.pwm_min_score(), nreport, scan_rc
)
result = [[(row[0] - m_mean) / m_std, row[1], row[2]] for row in result]
result = [row for row in result if row[0] >= cutoff]
else:
result = pwmscan(seq, motif.logodds, cutoff, nreport, scan_rc)
if cutoff <= motif.pwm_min_score() and len(result) == 0:
result = [[motif.pwm_min_score(), 0, 1]] * nreport
ret.append(result)
return ret
def scan_seq_mult(
seqs, seq_gc_bins, motifs, nreport, scan_rc, motifs_meanstd=None, zscore=False
):
ret = []
for seq, seq_gc_bin in zip(seqs, seq_gc_bins):
result = scan_sequence(
seq.upper(),
seq_gc_bin,
motifs,
nreport,
scan_rc,
motifs_meanstd=motifs_meanstd,
zscore=zscore,
)
ret.append(result)
return ret
def scan_fa_with_motif_moods(
fo, motifs, matrices, bg, thresholds, nreport, scan_rc=True
):
scanner = MOODS.scan.Scanner(7)
scanner.set_motifs(matrices, bg, thresholds)
ret = []
for name, seq in fo.items():
length = len(seq)
scan_seq = seq.upper()
if scan_rc:
scan_seq = "".join((scan_seq, "N" * 50, rc(scan_seq)))
results = scanner.scan_max_hits(scan_seq, nreport)
for motif, result in zip(motifs, results):
matches = []
for match in result:
strand = 1
pos = match.pos
if scan_rc:
if pos > length:
pos = length - (pos - length - 50) - len(motif)
strand = -1
matches.append((pos, match.score, strand))
ret.append((motif, {name: matches}))
return ret
def scan_fa_with_motif_moods_count(
fo, motifs, matrices, bg, thresholds, nreport, scan_rc=True
):
scanner = MOODS.scan.Scanner(7)
scanner.set_motifs(matrices, bg, thresholds)
ret = []
for name, seq in fo.items():
scan_seq = seq.upper()
if scan_rc:
scan_seq = "".join((scan_seq, "N" * 50, rc(scan_seq)))
results = scanner.counts_max_hits(scan_seq, nreport)
ret.append((name, results))
return ret
def calc_threshold_moods(m, c):
m_min = MOODS.tools.min_score(m)
m_max = MOODS.tools.max_score(m)
return m_min + (m_max - m_min) * c
def scan_it_moods(
infile, motifs, cutoff, bgfile, nreport=1, scan_rc=True, pvalue=None, count=False
):
tmpdir = mkdtemp()
matrices = []
pseudocount = 1e-3
# sys.stderr.write("bgfile: {}\n".format(bgfile))
bg = MOODS.tools.bg_from_sequence_dna("".join(Fasta(bgfile).seqs), 1)
for motif in motifs:
pfmname = os.path.join(tmpdir, "{}.pfm".format(motif.id))
with open(pfmname, "w") as f:
matrix = np.array(motif.pwm).transpose()
for line in [" ".join([str(x) for x in row]) for row in matrix]:
f.write("{}\n".format(line))
matrices.append(MOODS.parsers.pfm_log_odds(pfmname, bg, pseudocount))
thresholds = []
if pvalue is not None:
thresholds = [
MOODS.tools.threshold_from_p(m, bg, float(pvalue)) for m in matrices
]
# sys.stderr.write("{}\n".format(thresholds))
else:
thresholds = [calc_threshold_moods(m, float(cutoff)) for m in matrices]
scanner = MOODS.scan.Scanner(7)
scanner.set_motifs(matrices, bg, thresholds)
config = MotifConfig()
ncpus = int(config.get_default_params()["ncpus"])
fa = Fasta(infile)
chunk = 500
if (len(fa) / chunk) < ncpus:
chunk = len(fa) / (ncpus + 1)
jobs = []
func = scan_fa_with_motif_moods
if count:
func = scan_fa_with_motif_moods_count
pool = mp.Pool()
for i in range(0, len(fa), chunk):
jobs.append(
pool.apply_async(
func,
(fa[i : i + chunk], motifs, matrices, bg, thresholds, nreport, scan_rc),
)
)
for job in jobs:
for ret in job.get():
yield ret
class Scanner(object):
"""
scan sequences with motifs
"""
def __init__(self, ncpus=None):
self.config = MotifConfig()
self._threshold = None
self.genome = None
self.background = None
self.meanstd = {}
self.gc_bins = [(0, 1)]
if ncpus is None:
self.ncpus = int(MotifConfig().get_default_params()["ncpus"])
else:
self.ncpus = ncpus
if self.ncpus > 1:
# try:
# ctx = mp.get_context('spawn')
# self.pool = ctx.Pool(processes=self.ncpus)
# except AttributeError:
self.pool = mp.Pool(processes=self.ncpus)
self.use_cache = False
if self.config.get_default_params().get("use_cache", False):
self._init_cache()
def __del__(self):
# Close the pool because of memory leak
if hasattr(self, "pool"):
self.pool.close()
def _init_cache(self):
try:
self.cache = make_region().configure(
"dogpile.cache.pylibmc",
expiration_time=3600,
arguments={"url": ["127.0.0.1"], "binary": True}
# 'dogpile.cache.dbm',
# expiration_time = 3600,
# arguments = {
# 'filename': 'cache.dbm'
# }
)
self.use_cache = True
except Exception as e:
sys.stderr.write("failed to initialize cache\n")
sys.stderr.write("{}\n".format(e))
def set_motifs(self, motifs):
try:
# Check if motifs is a list of Motif instances
motifs[0].to_pwm()
tmp = NamedTemporaryFile(mode="w", delete=False)
for m in motifs:
tmp.write("{}\n".format(m.to_pwm()))
tmp.close()
motif_file = tmp.name
except AttributeError:
motif_file = motifs
self.motifs = motif_file
self.motif_ids = [m.id for m in read_motifs(motif_file)]
self.checksum = {}
if self.use_cache:
chksum = xxhash.xxh64("\n".join(sorted(self.motif_ids))).digest()
self.checksum[self.motifs] = chksum
def _meanstd_from_seqs(self, motifs, seqs):
scan_motifs = [(m, m.pwm_min_score()) for m in motifs]
table = []
for x in self._scan_sequences_with_motif(scan_motifs, seqs, 1, True):
table.append([row[0][0] for row in x])
for (motif, _), scores in zip(scan_motifs, np.array(table).transpose()):
yield motif, np.mean(scores), np.std(scores) # cutoff
def _threshold_from_seqs(self, motifs, seqs, fpr):
scan_motifs = [(m, m.pwm_min_score()) for m in motifs]
table = []
seq_gc_bins = [self.get_seq_bin(seq) for seq in seqs]
for gc_bin, result in zip(
seq_gc_bins, self._scan_sequences_with_motif(scan_motifs, seqs, 1, True)
):
table.append([gc_bin] + [row[0][0] for row in result])
df = pd.DataFrame(table, columns=["gc_bin"] + [m.id for m in motifs])
return df
def set_meanstd(self, gc=False):
if not self.background:
self.set_background(gc=gc)
self.meanstd = {}
seqs = self.background.seqs
if gc:
seq_bins = [s.split(" ")[-1] for s in self.background.ids]
else:
seq_bins = ["0.00-1.00"] * len(seqs)
if gc:
bins = list(set(seq_bins))
else:
bins = ["0.00-1.00"]
motifs = read_motifs(self.motifs)
lock.acquire()
try:
with Cache(CACHE_DIR) as cache:
scan_motifs = []
for bin in bins:
if bin not in self.meanstd:
self.meanstd[bin] = {}
bin_seqs = [s for s, b in zip(seqs, seq_bins) if b == bin]
for motif in motifs:
k = "e{}|{}|{}".format(motif.hash(), self.background_hash, bin)
results = cache.get(k)
if results is None:
scan_motifs.append(motif)
else:
self.meanstd[bin][motif.id] = results
if len(scan_motifs) > 0:
logger.debug("Determining mean and stddev for motifs.")
for motif, mean, std in self._meanstd_from_seqs(
scan_motifs, bin_seqs
):
k = "e{}|{}|{}".format(
motif.hash(), self.background_hash, bin
)
cache.set(k, [mean, std])
self.meanstd[bin][motif.id] = mean, std
# Prevent std of 0
# This should only happen in testing
for motif in motifs:
stds = np.array(
[self.meanstd[gcbin][motif.id][1] for gcbin in bins]
)
idx = stds == 0
if True in idx:
std = np.mean(stds[~idx])
for gcbin in np.array(bins)[idx]:
k = "e{}|{}|{}".format(
motif.hash(), self.background_hash, gcbin
)
mean = self.meanstd[gcbin][motif.id][0]
cache.set(k, [mean, std])
self.meanstd[gcbin][motif.id] = mean, std
except sqlite3.DatabaseError:
print_cluster_error_message()
sys.exit(1)
lock.release()
for gc_bin in self.gc_bins:
gc_bin = "{:.2f}-{:.2f}".format(*gc_bin)
if gc_bin not in self.meanstd:
valid_bins = []
for b in self.gc_bins:
bstr = "{:.2f}-{:.2f}".format(b[0], b[1])
if bstr in self.meanstd:
valid_bins.append(((b[0] + b[1]) / 2, bstr))
v = float(gc_bin.split("-")[1])
_, bstr = sorted(valid_bins, key=lambda x: abs(x[0] - v))[0]
# logger.warn(f"Using {bstr}")
self.meanstd[gc_bin] = self.meanstd[bstr]
def set_background(
self, fname=None, genome=None, size=200, nseq=None, gc=False, gc_bins=None
):
"""Set the background to use for FPR and z-score calculations.
Background can be specified either as a genome name or as the
name of a FASTA file.
Parameters
----------
fname : str, optional
Name of FASTA file to use as background.
genome : str, optional
Name of genome to use to retrieve random sequences.
size : int, optional
Size of genomic sequences to retrieve. The default
is 200.
nseq : int, optional
Number of genomic sequences to retrieve.
"""
if self.background:
return
size = int(size)
if gc_bins is None:
if gc:
gc_bins = [(0.0, 0.2), (0.8, 1)]
for b in np.arange(0.2, 0.799, 0.05):
gc_bins.append((b, b + 0.05))
else:
gc_bins = [(0, 1)]
if nseq is None:
nseq = max(10000, len(gc_bins) * 1000)
if genome and fname:
logger.debug("using genome for background")
fname = None
if fname:
if not os.path.exists(fname):
raise IOError("Background file {} does not exist!".format(fname))
self.background = Fasta(fname)
self.background_hash = file_checksum(fname)
return
if not genome:
if self.genome:
genome = self.genome
else:
raise ValueError("Need either genome or filename for background.")
logger.debug("using background: genome {} with size {}".format(genome, size))
lock.acquire()
try:
with Cache(CACHE_DIR) as cache:
self.background_hash = "d{}:{}:{}:{}".format(
genome, int(size), gc, str(gc_bins)
)
c = cache.get(self.background_hash)
if c:
fa, gc_bins = c
else:
fa = None
if not fa:
if gc:
with NamedTemporaryFile() as tmp:
logger.info("using {} sequences".format(nseq))
gc_bin_bedfile(
tmp.name, genome, number=nseq, length=size, bins=gc_bins
)
fa = as_fasta(tmp.name, genome=genome)
else:
fa = RandomGenomicFasta(genome, size, nseq)
cache.set(self.background_hash, (fa, gc_bins))
except sqlite3.DatabaseError:
print_cluster_error_message()
sys.exit(1)
lock.release()
self.background = fa
if gc_bins:
self.gc_bins = gc_bins
@property
def threshold(self):
if self._threshold is None:
self.set_threshold()
return self._threshold
def set_threshold(self, fpr=None, threshold=None, gc=False):
"""Set motif scanning threshold based on background sequences.
Parameters
----------
fpr : float, optional
Desired FPR, between 0.0 and 1.0.
threshold : float or str, optional
Desired motif threshold, expressed as the fraction of the
difference between minimum and maximum score of the PWM.
Should either be a float between 0.0 and 1.0 or a filename
with thresholds as created by 'gimme threshold'.
"""
if threshold and fpr:
raise ValueError("Need either fpr or threshold.")
if threshold is None and fpr is None:
if self.genome:
fpr = 0.01
logger.info(f"Using default FPR of {fpr}")
else:
threshold = 0.95
logger.info(
f"Genome not specified, using default threshold of {threshold}."
)
logger.info("This is likely not ideal.")
if fpr:
fpr = float(fpr)
if not (0.0 < fpr < 1.0):
raise ValueError("Parameter fpr should be between 0 and 1")
if not self.motifs:
raise ValueError("please run set_motifs() first")
motifs = read_motifs(self.motifs)
gc_bins = ["{:.2f}-{:.2f}".format(*gc_bin) for gc_bin in self.gc_bins]
if threshold is not None:
d = parse_threshold_values(self.motifs, threshold)
self._threshold = pd.DataFrame(d, index=[0])
self._threshold = self._threshold.join(
pd.DataFrame(gc_bins, index=[0] * len(gc_bins), columns=["gc_bin"])
)
self._threshold = self._threshold.set_index("gc_bin")
return
if not self.background:
try:
self.set_background(gc=gc)
except Exception:
raise ValueError("please run set_background() first")
seqs = self.background.seqs
lock.acquire()
try:
with Cache(CACHE_DIR) as cache:
scan_motifs = []
self._threshold = None
for motif in motifs:
k = "{}|{}|{:.4f}|{}".format(
motif.hash(),
self.background_hash,
fpr,
",".join(sorted(gc_bins)),
)
vals = cache.get(k)
if vals is None:
scan_motifs.append(motif)
else:
if self._threshold is None:
self._threshold = vals.to_frame()
else:
self._threshold[motif.id] = vals
if len(scan_motifs) > 0:
logger.info("determining FPR-based threshold")
df = self._threshold_from_seqs(scan_motifs, seqs, fpr).set_index(
"gc_bin"
)
if self._threshold is None:
self._threshold = df
else:
self._threshold = | pd.concat((self._threshold, df), axis=1) | pandas.concat |
import itertools as itt
import pathlib as pl
from configparser import ConfigParser
import joblib as jl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as sst
import seaborn as sns
from statannot import add_stat_annotation
from src.visualization import fancy_plots as fplt
from src.data.cache import set_name
"""
2020-05-??
Used an exponential decay to model the evolution of contextual effects over time. Here thee fitted parameters (tau and
y intercept r0) are compared across different treatments (probes, transitions_pairs), between single cell and population
analysis (dPCA, LDA) and finally between fitting the dprime or its profile of significance.
tau is selected from the fitted significance profile, and r0 form the fitted dprime
2020-06-30
further finer selection is done considering the goodness of the fit.
outlier values tend to correspond with poor fits
Also compares the R2 goodness of fit with the standard error of the fitted parameters
"""
config = ConfigParser()
config.read_file(open(pl.Path(__file__).parents[2] / 'config' / 'settings.ini'))
# analysis should be createde and cached with trp_batch_dprime.py beforehand, using the same meta parameters
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'transitions': ['silence', 'continuous', 'similar', 'sharp'],
'montecarlo': 1000,
'zscore': True,
'dprime_absolute': None}
# transferable plotting parameters
plt.rcParams['svg.fonttype'] = 'none'
sup_title_size = 30
sub_title_size = 20
ax_lab_size = 15
ax_val_size = 11
full_screen = [19.2, 9.83]
sns.set_style("ticks")
########################################################################################################################
########################################################################################################################
# data frame containing all the important summary data, i.e. exponential decay fits for dprime and significance, for
# all combinations of transition pairs, and probes, for the means across probes, transitions pairs or for both, and
# for the single cell analysis or the dPCA projections
summary_DF_file = pl.Path(config['paths']['analysis_cache']) / 'DF_summary' / set_name(meta)
print('loading cached summary DataFrame')
DF = jl.load(summary_DF_file)
########################################################################################################################
# SC
########################################################################################################################
# compare parameters between different probes or transitions pairs
analyses = ['SC', 'dPCA']
sources = ['dprime', 'significance']
parameters = ['tau', 'r0']
comparisons = ['probe', 'transition_pair']
good_thresh = 0.1
for analysis, source, parameter, compare in itt.product(analyses, sources, parameters, comparisons):
# # for single plot
# analysis = 'SC'
# source = 'dprime'
# parameter = 'tau'
# compare = 'transition_pair'
if compare == 'probe':
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
elif compare == 'transition_pair':
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_anal = DF.analysis == analysis
ff_param = DF.parameter == parameter
ff_source = DF.source == source
ff_good = DF.goodness > good_thresh
if analysis == 'SC':
index = 'cellid'
elif analysis in ('dPCA', 'LDA'):
index = 'siteid'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
[index, compare, 'goodness', 'value']]
pivoted = filtered.pivot(index=index, columns=compare, values='value').dropna().reset_index()
molten = pivoted.melt(id_vars=index, var_name=compare)
fig, ax = plt.subplots()
_ = fplt.paired_comparisons(ax, data=molten,x=compare, y='value', color='gray', alpha=0.3)
ax = sns.boxplot(x=compare, y='value', data=molten, ax=ax, color='gray', width=0.5)
sns.despine(ax=ax)
# no significant comparisons
box_pairs = list(itt.combinations(filtered[compare].unique(), 2))
stat_resutls = fplt.add_stat_annotation(ax, data=molten, x=compare, y='value', test='Wilcoxon',
box_pairs=box_pairs, width=0.5, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'{analysis} {source}-{parameter} between {compare} goodness {good_thresh}'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
########################################################################################################################
# Distribution of cells in r0 tau space.
good_thresh = 0.1
r0_source = 'dprime'
tau_source = 'dprime'
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == r0_source
ff_good = DF.goodness > good_thresh
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == tau_source
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = pd.concat([R0, Tau])
pivoted = filtered.pivot_table(index=['region', 'siteid', 'cellid'],
columns='parameter', values='value').dropna().reset_index()
fig, ax = plt.subplots()
ax = sns.regplot(x='r0', y='tau', data=pivoted, color='black')
sns.despine(ax=ax)
# ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
# ax.set_xlabel('amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
_, _, r2, _, _ = sst.linregress(pivoted.r0, pivoted.tau)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'all cell summary param space {r0_source}_r0 {tau_source}_tau r={r2:.3f} goodness {good_thresh}'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
#########################################################
# cells in parameter space colored by site
fig, ax = plt.subplots()
# ax = sns.scatterplot(x='r0', y='tau', data=pivoted, color='black')
ax = sns.scatterplot(x='r0', y='tau', hue='siteid', data=pivoted, legend='full')
ax.legend(loc='upper right', fontsize='large', markerscale=1, frameon=False)
sns.despine(ax=ax)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xlabel('amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'cells in parameter space by site'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
#########################################################
# cells in parameter space colored by region
fig, ax = plt.subplots()
# ax = sns.scatterplot(x='r0', y='tau', data=pivoted, color='black')
ax = sns.scatterplot(x='r0', y='tau', hue='region', data=pivoted, legend='full')
ax.legend(loc='upper right', fontsize='large', markerscale=1, frameon=False)
sns.despine(ax=ax)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xlabel('amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'cells in parameter space by region'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
########################################################################################################################
# single cell comparison between regions and parameters
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
ff_good = DF.goodness > 0.01
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = pd.concat([R0, Tau])
# molten = pivoted.melt(id_vars='cellid', var_name='transition_pair')
g = sns.catplot(x='region', y='value', col='parameter', data=filtered, kind="swarm",
sharex=True, sharey=False)
sns.despine()
# add significnace
for ax, param in zip(np.ravel(g.axes), filtered.parameter.unique()):
sub_filtered = filtered.loc[filtered.parameter == param, :]
box_pairs = [('PEG', 'A1')]
stat_resutls = add_stat_annotation(ax, data=sub_filtered, x='region', y='value', test='Mann-Whitney',
box_pairs=box_pairs, comparisons_correction=None)
if param == 'r0':
param = 'z-score'
elif param == 'tau':
param = 'ms'
ax.set_ylabel(f'{param}', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'SC parameter comparison between regions'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
########################################################################################################################
# Compares tau between dprime and significance
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter.isin(['tau', 'r0'])
ff_good = DF.goodness > 0.01
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_good,
['cellid', 'source', 'parameter', 'value']]
pivoted = filtered.pivot_table(index=['cellid', 'parameter'], columns='source', values='value').dropna().reset_index()
facet_grid = sns.lmplot(x='dprime', y='significance', col='parameter', data=pivoted,
sharex=False, sharey=False, scatter_kws={'color': 'black'}, line_kws={'color': 'black'})
# draws unit line, formats ax
for ax in np.ravel(facet_grid.axes):
_ = fplt.unit_line(ax)
ax.xaxis.label.set_size(ax_lab_size)
ax.yaxis.label.set_size(ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((16, 8))
title = f'significance vs dprime fitted params comparison'
fig.suptitle(title, fontsize=20)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
########################################################################################################################
# dCPA dPCA
########################################################################################################################
# dPCA comparison between regions and parameters
ff_anal = DF.analysis == 'dPCA'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_good = DF.goodness > 0.01
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = pd.concat([R0, Tau])
# g = sns.catplot(x='region', y='value', col='parameter', data=filtered, kind="violin", cut=0,
# sharex=True, sharey=False)
g = sns.catplot(x='region', y='value', col='parameter', data=filtered, kind="swarm",
sharex=True, sharey=False)
sns.despine()
# add significnace
for ax, param in zip(np.ravel(g.axes), filtered.parameter.unique()):
sub_filtered = filtered.loc[filtered.parameter == param, :]
box_pairs = [('PEG', 'A1')]
stat_resutls = add_stat_annotation(ax, data=sub_filtered, x='region', y='value', test='Mann-Whitney',
box_pairs=box_pairs, comparisons_correction=None)
if param == 'r0':
param = 'z-score'
elif param == 'tau':
param = 'ms'
ax.set_ylabel(f'{param}', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'dPCA parameter comparison between regions'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'SFN20_figures', title)
########################################################################################################################
# SC vs dPCA taus, filtering SC with r0 of dPCA
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_good = DF.goodness > 0.01
sing = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
sing_pivot = sing.pivot(index='siteid', columns='cellid', values='value')
sing_pivot['max'] = sing_pivot.mean(axis=1)
ff_anal = DF.analysis == 'dPCA'
pops = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_good,
['region', 'siteid', 'cellid', 'parameter', 'value']]
pops = pops.set_index('siteid')
toplot = | pd.concat((pops.loc[:, ['region', 'value']], sing_pivot.loc[:, 'max']), axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
from scipy.stats import skew
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from mlxtend.regressor import StackingCVRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# Load The Data
TRAIN_FILE_PATH = 'C:/Users/ahlaw/Downloads/train.csv'
TEST_FILE_PATH = 'C:/Users/ahlaw/Downloads/test.csv'
training_data = pd.read_csv(TRAIN_FILE_PATH)
test_data = pd.read_csv(TEST_FILE_PATH)
training_rows = training_data.shape[0]
test_data['SalePrice'] = 0
all_data = pd.concat([training_data, test_data]).reset_index(drop = True)
#remove outliers, 2 rows have very large GrLivArea but very small SalePrice
all_data = all_data.drop(all_data[(all_data['GrLivArea']>4000) & (all_data['SalePrice']<300000) & (all_data['SalePrice'] != 0)].index)
training_rows = training_rows - 2
#Handle missing values
#We impute values of Categorical features with their mode and numerical features with their median unless the missing values
#have some special meaning or can be handled by observing the other features
#We impute below categorical features with their mode
misc_features = ['Functional', 'Electrical', 'KitchenQual', 'Exterior1st', 'Exterior2nd', 'SaleType', 'Utilities']
for feature in misc_features:
all_data[feature].fillna(all_data[feature].mode()[0], inplace = True)
#Missing Values of PoolQC indicate the absence of a pool. We can check if any values are missing even with a pool.
# all_data[(all_data['PoolArea'] != 0) & (all_data['PoolQC'].isnull())]
#After executing above statement we can see 3 rows have PoolQC missing with a pool. We can estimate PoolQC by the
#OverallQual
all_data.loc[2420, 'PoolQC'] = 'Fa'
all_data.loc[2504, 'PoolQC'] = 'Gd'
all_data.loc[2600, 'PoolQC'] = 'Fa'
#Missing values of GarageArea indicate no garage
all_data['GarageArea'].fillna(0, inplace = True)
#For rows where there is a garage we simply impute Garage categorical features with their mode and numerical
#features with their median
garage_feats = ['GarageQual', 'GarageCond', 'GarageFinish']
for feat in garage_feats:
all_data.loc[(all_data['GarageArea'] != 0) & (all_data[feat].isnull()), feat] = all_data[feat].mode()[0]
all_data.loc[(all_data['GarageArea'] != 0) &
(all_data['GarageYrBlt'].isnull()), 'GarageYrBlt'] = all_data['GarageYrBlt'].median()
all_data.loc[(all_data['GarageArea'] != 0) &
(all_data['GarageCars'].isnull()), 'GarageCars'] = all_data['GarageCars'].median()
#Missing TotalBsmtSF indicates absence of a Basement
all_data['TotalBsmtSF'].fillna(0, inplace = True)
#Some rows have missing Basement features even with a basement, we can check this by executing below statement
# all_data[(all_data['TotalBsmtSF'] != 0) & (all_data['BsmtQual'].isnull())]
#For BsmtQual we can get an estimate using the size of the basement and BsmtCond
all_data.loc[2217, 'BsmtQual'] = 'Fa'
all_data.loc[2218, 'BsmtQual'] = 'TA'
#Again, we can check for rows with a basement but no BsmtCond Values using below statement
# all_data[(all_data['TotalBsmtSF'] != 0) & (all_data['BsmtCond'].isnull())]
#We can estimate BsmtCond using BsmtFinType1 and BsmtFinType2
all_data.loc[2040, 'BsmtCond'] = 'Gd'
all_data.loc[2185, 'BsmtCond'] = 'Fa'
all_data.loc[2524, 'BsmtCond'] = 'TA'
#For the rows having a basement and no BsmtExposure value, we can observe they are unfinished and its safe to assume
#they wont have any exposure
all_data.loc[(all_data['TotalBsmtSF'] != 0) &
(all_data['BsmtExposure'].isnull()), 'BsmtExposure'] = 'No'
#Use the below statement to check missing BsmtFinType2 values in houses with basements
# all_data[(all_data['TotalBsmtSF'] != 0) & (all_data['BsmtFinType2'].isnull())]
#The below house has high overallqual and moderate price, so, we can get a good estimate
all_data.loc[332, 'BsmtFinType2'] = 'ALQ'
#Missing MassVnrArea indicate no Veneer
all_data['MasVnrArea'].fillna(0, inplace = True)
#We can find houses with veneer but missing MasVnrType using below statement
# all_data[(all_data['MasVnrArea'] != 0) & (all_data['MasVnrType'].isnull())]
#This house has very low OverallQual so its safe to assume it wont have any MasVnrType
all_data.loc[2610, 'MasVnrType'] = 'None'
#Houses in same MSSubclass should have same MSZoning
all_data['MSZoning'] = all_data.groupby('MSSubClass')['MSZoning'].transform(lambda x: x.fillna(x.mode()[0]))
#For below categorical features missing values indicate their absence
feats = ['PoolQC', 'MiscFeature', 'Alley', 'FireplaceQu', 'Fence', 'GarageQual', 'GarageCond', 'GarageFinish', 'GarageType',
'BsmtQual', 'BsmtCond', 'BsmtFinType1', 'BsmtFinType2', 'BsmtExposure', 'MasVnrType']
for feat in feats:
all_data[feat].fillna('None', inplace = True)
#Lotfrontage should be similar for houses in the same neighborhood
all_data['LotFrontage'] = all_data.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
#For below numerical features missing vaLues indicate their absence
feats = ['GarageYrBlt', 'BsmtHalfBath', 'BsmtFullBath', 'GarageCars', 'BsmtUnfSF', 'BsmtFinSF2', 'BsmtFinSF1']
for feat in feats:
all_data[feat].fillna(0, inplace = True)
#This takes care of any rows with wrong GarageYrBlt
all_data.loc[all_data['GarageYrBlt'] > all_data['YrSold'], 'GarageYrBlt'] = all_data['YrSold']
#MSSubClass is stores as a numerical feature but is actually categorical, so, we convert it
all_data['MSSubClass'] = all_data['MSSubClass'].astype('str')
#linear models behave well with centred data, so, we will try to centre some of the skewed features
numeric_feats = list(all_data.select_dtypes(include = np.number).columns)
numeric_feats = [e for e in numeric_feats if e not in ('Id', 'SalePrice')]
skewness = all_data[numeric_feats].apply(lambda x: skew(x)).sort_values(ascending=False)
high_skewness = skewness[(skewness) > 0.75]
skewed_feats = list(high_skewness.index)
for feat in skewed_feats:
all_data[feat]= boxcox1p(all_data[feat], boxcox_normmax(all_data[feat]+1))
#Lets centre SalePrice as it is also skewed
all_data["SalePrice"] = np.log1p(all_data["SalePrice"])
#Lets create new features from pre existing features
all_data['Total_sqr_footage'] = (all_data['BsmtFinSF1'] + all_data['BsmtFinSF2'] +
all_data['1stFlrSF'] + all_data['2ndFlrSF'])
all_data['Total_Bathrooms'] = (all_data['FullBath'] + (0.5*all_data['HalfBath']) +
all_data['BsmtFullBath'] + (0.5*all_data['BsmtHalfBath']))
all_data['Total_porch_sf'] = (all_data['OpenPorchSF'] + all_data['3SsnPorch'] +
all_data['EnclosedPorch'] + all_data['ScreenPorch'] + all_data['WoodDeckSF'])
all_data['HasPool'] = all_data['PoolArea'].apply(lambda x: 1 if x > 0 else 0)
all_data['Has2ndFloor'] = all_data['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0)
all_data['HasGarage'] = all_data['GarageArea'].apply(lambda x: 1 if x > 0 else 0)
all_data['HasBsmt'] = all_data['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)
all_data['HasFireplace'] = all_data['Fireplaces'].apply(lambda x: 1 if x > 0 else 0)
all_data['Age'] = all_data['YrSold'] - all_data['YearBuilt']
all_data['RemodAge'] = all_data['YrSold'] - all_data['YearRemodAdd']
#Utilties and Street features have very less variance and donot provide any valuable information to our model
#YearRemodAdd and Yearbuilt are actually categorical features but have high variance thus making one hot encoding
#them impossible, so, create Age and RemodAge features in their stead.
all_data = all_data.drop(['Utilities', 'Street', 'YearRemodAdd', 'YearBuilt'], axis=1)
#One Hot encoding the categorical features
final_data = | pd.get_dummies(all_data) | pandas.get_dummies |
import pandas as pd
import pytest
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
| pd.set_option("display.width", 1000) | pandas.set_option |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna( | Timestamp("2011-01-02 10:00", tz=tz) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 18:10:18 2019
@author: <NAME>
Code will plot the keypoint coordinates vs time in order to assign the maximum
value from this plot to the real-world distance measurement. This will be
the label.
Coding Improvement Note: Make use of functions for things like this.
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks
from scipy.signal import medfilt
from scipy.signal import peak_prominences
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
#Edit data within file.
#Open file and set to a certain variable
ankle_df = pd.read_csv('jointTracker (16).csv', header=None) #This file has me pretty clearly tracked
rknee1_df = pd.read_csv('20191002_rknee_pos_1st.csv', header=None)
rknee2_df = pd.read_csv('20191002_rknee_pos_2nd.csv', header=None)
rknee3_df = pd.read_csv('20191002_rknee_pos_3rd.csv', header=None)
rknee4_df = pd.read_csv('20191002_rknee_pos_4th.csv', header=None)
rknee5_df = pd.read_csv('20191002_rknee_pos_5th.csv', header=None)
rknee6_df = pd.read_csv('20191002_rknee_pos_6th.csv', header=None)
rknee7_df = pd.read_csv('20191002_rknee_pos_7th.csv', header=None)
rknee8_df = pd.read_csv('20191002_rknee_pos_8th.csv', header=None)
real_measures = np.array([32,33,32,35,35,
32,32,32,33,35,
34,36,35,35,34,34,35,35,34,35,
31,33,37,34,33,33,33,35,35,35,
30,31,33,23,25,28,28,29,31,42,
32,31.5,24,29,37,36,31,34,28,33.5,
38,38,42,42,42,41,43,38,39,40,
32,34,41,36,36,35,37,36,38,40]) #Document real measures
real_measures_df = pd.DataFrame(data=real_measures[0:]) #Convert to a DataFrame
#Tabulate height and weight columns
heights_df = | pd.DataFrame({"Height": [69]*50 + [69.5]*10 + [67]*10}) | pandas.DataFrame |
"""
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import BytesIO
import os
import tempfile
import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
def test_bytes_io_input(all_parsers):
encoding = "cp1255"
parser = all_parsers
data = BytesIO("שלום:1234\n562:123".encode(encoding))
result = parser.read_csv(data, sep=":", encoding=encoding)
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_read_csv_unicode(all_parsers):
parser = all_parsers
data = BytesIO("\u0141aski, Jan;1".encode("utf-8"))
result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
expected = DataFrame([["\u0141aski, Jan", 1]])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff_table = correlation_dff * correlation_dff
r2_dff_table.insert(0, 'Features', features)
data_frame = r2_dff_table
if outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier_table = correlation_dff_outlier * correlation_dff_outlier
r2_dff_outlier_table.insert(0, 'Features', features_outlier)
data_frame = r2_dff_outlier_table
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-eigenA', 'download'),
[Input("matrix-type-data-table", 'value'),
Input('eigenA-outlier', 'value')])
def update_filename(matrix_type, outlier):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-eigenA', 'data'),
Output('data-table-eigenA', 'columns'),
Output('download-link-eigenA', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
Var_dfff = pd.concat([(Var_cumsum * 100)], axis=1)
Eigen_Analysis = pd.concat([PC_df.T, Eigen_df.T, Var_df.T, Var_dfff.T], axis=0)
Eigen_Analysis = Eigen_Analysis.rename(columns=Eigen_Analysis.iloc[0])
Eigen_Analysis = Eigen_Analysis.drop(Eigen_Analysis.index[0])
Eigen_Analysis.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
Var_dfff_outlier = pd.concat([Var_cumsum_outlier * 100], axis=1)
Eigen_Analysis_Outlier = pd.concat(
[PC_df_outlier.T, Eigen_df_outlier.T, Var_df_outlier.T, Var_dfff_outlier.T],
axis=0)
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.rename(columns=Eigen_Analysis_Outlier.iloc[0])
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.drop(Eigen_Analysis_Outlier.index[0])
Eigen_Analysis_Outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_covar = dff.loc[:, features].values
pca_covar = PCA(n_components=len(features))
principalComponents_covar = pca_covar.fit_transform(x_covar)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
dfff_covar = finalDf_covar
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
Var_dfff_covar = pd.concat([(Var_cumsum_covar * 100)], axis=1)
Eigen_Analysis_covar = pd.concat([PC_df_covar.T, Eigen_df_covar.T, Var_df_covar.T, Var_dfff_covar.T],
axis=0)
Eigen_Analysis_covar = Eigen_Analysis_covar.rename(columns=Eigen_Analysis_covar.iloc[0])
Eigen_Analysis_covar = Eigen_Analysis_covar.drop(Eigen_Analysis_covar.index[0])
Eigen_Analysis_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier_covar = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier_covar = outlier_dff.loc[:, ].values
pca_outlier_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier_covar)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
dfff_outlier_covar = finalDf_outlier_covar
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier_covar = np.interp(70,
Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier_covar = math.ceil(PC_interp_outlier_covar)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
Var_dfff_outlier_covar = pd.concat([Var_cumsum_outlier_covar * 100], axis=1)
Eigen_Analysis_Outlier_covar = pd.concat(
[PC_df_outlier_covar.T, Eigen_df_outlier_covar.T, Var_df_outlier_covar.T, Var_dfff_outlier_covar.T],
axis=0)
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.rename(
columns=Eigen_Analysis_Outlier_covar.iloc[0])
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.drop(Eigen_Analysis_Outlier_covar.index[0])
Eigen_Analysis_Outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier_covar
elif all_custom == "Custom":
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# INPUT DATA WITH OUTLIERS
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
eigenvalues_scale_input = pca_scale_input.explained_variance_
Eigen_df_scale_input = pd.DataFrame(data=eigenvalues_scale_input, columns=["Eigenvaues"])
PC_df_scale_input = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input = pd.DataFrame(data=Var_scale_input,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input = Var_df_scale_input.cumsum()
Var_dfff_scale_input = pd.concat([Var_cumsum_scale_input * 100], axis=1)
Eigen_Analysis_scale_input = pd.concat([PC_df_scale_input.T, Eigen_df_scale_input.T,
Var_df_scale_input.T, Var_dfff_scale_input.T], axis=0)
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.rename(columns=Eigen_Analysis_scale_input.iloc[0])
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.drop(Eigen_Analysis_scale_input.index[0])
Eigen_Analysis_scale_input.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input
elif outlier == "Yes" and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
eigenvalues_scale_input_outlier = pca_scale_input_outlier.explained_variance_
Eigen_df_scale_input_outlier = pd.DataFrame(data=eigenvalues_scale_input_outlier, columns=["Eigenvaues"])
PC_df_scale_input_outlier = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier = pd.DataFrame(data=Var_scale_input_outlier,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier = Var_df_scale_input_outlier.cumsum()
Var_dfff_scale_input_outlier = pd.concat([Var_cumsum_scale_input_outlier * 100], axis=1)
Eigen_Analysis_scale_input_outlier = pd.concat([PC_df_scale_input_outlier.T, Eigen_df_scale_input_outlier.T,
Var_df_scale_input_outlier.T,
Var_dfff_scale_input_outlier.T], axis=0)
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.rename(
columns=Eigen_Analysis_scale_input_outlier.iloc[0])
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.drop(
Eigen_Analysis_scale_input_outlier.index[0])
Eigen_Analysis_scale_input_outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input_covar = dff_input.loc[:, features_input].values
# INPUT DATA WITH OUTLIERS
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
eigenvalues_scale_input_covar = pca_scale_input_covar.explained_variance_
Eigen_df_scale_input_covar = pd.DataFrame(data=eigenvalues_scale_input_covar, columns=["Eigenvaues"])
PC_df_scale_input_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input_covar = pd.DataFrame(data=Var_scale_input_covar,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input_covar = Var_df_scale_input_covar.cumsum()
Var_dfff_scale_input_covar = pd.concat([Var_cumsum_scale_input_covar * 100], axis=1)
Eigen_Analysis_scale_input_covar = pd.concat([PC_df_scale_input_covar.T, Eigen_df_scale_input_covar.T,
Var_df_scale_input_covar.T, Var_dfff_scale_input_covar.T],
axis=0)
Eigen_Analysis_scale_input_covar = Eigen_Analysis_scale_input_covar.rename(
columns=Eigen_Analysis_scale_input_covar.iloc[0])
Eigen_Analysis_scale_input_covar = Eigen_Analysis_scale_input_covar.drop(
Eigen_Analysis_scale_input_covar.index[0])
Eigen_Analysis_scale_input_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
eigenvalues_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_
Eigen_df_scale_input_outlier_covar = pd.DataFrame(data=eigenvalues_scale_input_outlier_covar,
columns=["Eigenvaues"])
PC_df_scale_input_outlier_covar = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier_covar = pd.DataFrame(data=Var_scale_input_outlier_covar,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier_covar = Var_df_scale_input_outlier_covar.cumsum()
Var_dfff_scale_input_outlier_covar = pd.concat([Var_cumsum_scale_input_outlier_covar * 100], axis=1)
Eigen_Analysis_scale_input_outlier_covar = pd.concat(
[PC_df_scale_input_outlier_covar.T, Eigen_df_scale_input_outlier_covar.T,
Var_df_scale_input_outlier_covar.T,
Var_dfff_scale_input_outlier_covar.T], axis=0)
Eigen_Analysis_scale_input_outlier_covar = Eigen_Analysis_scale_input_outlier_covar.rename(
columns=Eigen_Analysis_scale_input_outlier_covar.iloc[0])
Eigen_Analysis_scale_input_outlier_covar = Eigen_Analysis_scale_input_outlier_covar.drop(
Eigen_Analysis_scale_input_outlier_covar.index[0])
Eigen_Analysis_scale_input_outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier_covar
data = data_frame_EigenA.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame_EigenA.columns]
csv_string = data_frame_EigenA.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-loadings', 'download'),
[Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(outlier, matrix_type):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Loadings_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Loadings_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Loadings_correlation_matrix_data.csv'
elif outlier == 'No' and matrix_type == "Covariance":
download = 'Loadings_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-loadings', 'data'),
Output('data-table-loadings', 'columns'),
Output('download-link-loadings', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale,
columns=["PC" + str(i + 1) for i in range(len(features))])
line_group_scale_df = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf = pd.concat([line_group_scale_df, loading_scale_df], axis=1)
data_frame = loading_scale_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale,
columns=["PC" + str(i + 1) for i in range(len(features_outlier))])
line_group_outlier_scale_df = pd.DataFrame(data=features_outlier, columns=['Features'])
loading_outlier_scale_dataf = pd.concat([line_group_outlier_scale_df, loading_outlier_scale_df], axis=1)
data_frame = loading_outlier_scale_dataf
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar,
columns=["PC" + str(i + 1) for i in range(len(features))])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf_covar = pd.concat([line_group_scale_df_covar, loading_scale_df_covar], axis=1)
data_frame = loading_scale_dataf_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
# uses covariance matrix
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar,
columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar,
columns=["PC" + str(i + 1) for i in
range(len(features_outlier))])
line_group_outlier_scale_df_covar = pd.DataFrame(data=features_outlier, columns=['Features'])
loading_outlier_scale_dataf_covar = pd.concat(
[line_group_outlier_scale_df_covar, loading_outlier_scale_df_covar], axis=1)
data_frame = loading_outlier_scale_dataf_covar
if all_custom == 'Custom':
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input,
columns=["PC" + str(i + 1) for i in range(len(features_input))])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['Features'])
loading_scale_input_dataf = pd.concat([line_group_scale_input_df, loading_scale_input_df], axis=1)
data_frame = loading_scale_input_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
dff_target = dff[input]
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier,
columns=["PC" + str(i + 1)
for i in range(len(features_input_outlier))])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['Features'])
loading_scale_input_outlier_dataf = pd.concat([line_group_scale_input_outlier_df,
loading_scale_input_outlier_df], axis=1)
data_frame = loading_scale_input_outlier_dataf
elif outlier == "No" and matrix_type == "Covariance":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# INPUT DATA WITH OUTLIERS
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar,
columns=["PC" + str(i + 1) for i in range(len(features_input))])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['Features'])
loading_scale_input_dataf_covar = pd.concat([line_group_scale_input_df_covar, loading_scale_input_df_covar],
axis=1)
data_frame = loading_scale_input_dataf_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
dff_target = dff[input]
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar,
columns=["PC" + str(i + 1)
for i in range(len(features_input_outlier))])
line_group_scale_input_outlier_df_covar = | pd.DataFrame(data=features_input_outlier, columns=['Features']) | pandas.DataFrame |
from datetime import datetime
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, make_scorer, r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def impute_mode(df, variable):
'''
Usage: replace NaN with the mode in specific column
Input arguments:
df -- a dataframe object
variable -- a column where you want to apply imputation
Return: None
'''
# find most frequent category
most_frequent_category = df.groupby([variable])[variable].count().sort_values(ascending=False).index[0]
# replace NA
df[variable].fillna(most_frequent_category, inplace=True)
def day_diff(df):
'''
Usage: calculate the day difference using columns "host_since" and "last_scraped"
Input arguments:
df -- a dataframe object
Return: None
'''
if ('last_scraped' in df.columns) & ('host_since' in df.columns):
df['host_days'] = (pd.to_datetime(df['last_scraped']) - | pd.to_datetime(df['host_since']) | pandas.to_datetime |
import sys, os, argparse, pickle, json, hashlib, copy
import pandas as pd
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler, MinMaxScaler
project_root = os.getcwd()
sys.path.append(project_root)
import demand.models.utils_general as ug
from demand.utils.cache import cached_with_io
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='This function returns organized raw data by ISO country code before and after normalization. '
'Users are given the option to specify whether consumption data is organized by '
'country-wide values, or by per capita consumption values. Users are also given the'
'ability to specify which features are required without missing values, and which '
'features are allowed to be given with missing values.')
# static
# download_data.py
parser.add_argument('-cp', '--cntry_path',
default=os.path.join(project_root, 'data',
'raw', 'country-and-continent-codes-list-csv_csv',
'country-and-continent-codes-list-csv_csv.csv'),
help='path to lookup tables for countries.')
# 1960-2019
# download_data.py
# https://data.worldbank.org/indicator/SP.POP.TOTL
parser.add_argument('-pp', '--pop_path',
default=os.path.join(project_root, 'data', 'raw',
'wb_pop',
'API_SP.POP.TOTL_DS2_en_excel_v2_10058049_clean.csv'),
help='path to raw population data.')
# 1960-2018
# download_data.py
parser.add_argument('-gp', '--gdp_path',
default=os.path.join(project_root, 'data', 'raw', 'GDPpc-wb-1960-2018',
'GDPpc-wb-1960-2018.csv'),
help='path to raw gdp data.')
# 1970-2018
# download_data.py
parser.add_argument('-gps', '--gdp_path_somalia',
default=os.path.join(project_root, 'data', 'raw',
'UNdata_Export_20201006_214431107_SomaliaGDPpc',
'UNdata_Export_20201006_214431107_SomaliaGDPpc.csv'),
help='path to raw gdp data for somalia.')
# 1961-2016
# download_data.py
parser.add_argument('-tp', '--temp_path',
default=os.path.join(project_root, 'data', 'raw',
'temperature_1960-2016',
'temperature_1960-2016.xlsx'),
help='path to raw temperature data.')
# 1961-2016
# download_data.py
parser.add_argument('-rp', '--rain_path',
default=os.path.join(project_root, 'data', 'raw',
'rainfall_1960-2016',
'rainfall_1960-2016.xlsx'),
help='path to raw rainfall data.')
# 1971-2018
# download_data.py
parser.add_argument('-isp', '--iea_wes_path',
default=os.path.join(project_root, 'data', 'raw',
'iea_wes_2020-68578195-en',
'WBES-2020-1-EN-20210318T100006.csv'),
help='path to raw iea world energy statistics data.')
# 1971-2018
# download_data.py
parser.add_argument('-ibp', '--iea_web_path',
default=os.path.join(project_root, 'data', 'raw',
'iea_web_2020-cde01922-en',
'WBAL-2020-1-EN-20210301T100458.csv'),
help='path to raw iea world energy balances data.')
# 2010-2019 - not currently using this
parser.add_argument('-cip', '--cdd_iea_path',
default=os.path.join(project_root, 'data', 'raw',
'CDD_18_IEA_20210406',
'Weatherforenergytracker-highlights-CDD18-daily-annual_w_iso.xlsx'),
help='CDD 18degC data from the IEA.')
# 2010-2019 - not currently using this
parser.add_argument('-hip', '--hdd_iea_path',
default=os.path.join(project_root, 'data', 'raw',
'HDD_18_IEA_20210406',
'Weatherforenergytracker-highlights-HDD18-daily-annual_w_iso.xlsx'),
help='HDD 18degC data from the IEA.')
# 195X-2013
parser.add_argument('-chp', '--cdd_hdd_atalla_path',
default=os.path.join(project_root, 'data', 'raw',
'CDD_HDD_18_Atalla_20210406',
'1-s2.0-S0360544217318388-mmc1_w_iso.xlsx'),
help='CDD and HDD 18degC data from the Atalla et al.')
# battle_deaths_path
# 1989-2019
parser.add_argument('-bdp', '--battle_deaths_path',
default=os.path.join(project_root, 'data', 'raw',
'API_VC.BTL.DETH_DS2_en_csv_v2_2167203',
'API_VC.BTL.DETH_DS2_en_csv_v2_2167203.csv'),
help='wb battle deaths data path.')
parser.add_argument('-ft', '--feat_target', default='tot_elec_cons',
help='The target feature.'
'Should be mutually exclusive with feats_complete_req, feats_nans_allowed.'
'Available options are listed in the arg --all_feats'
)
parser.add_argument('-fcr', '--feats_complete_req', default='["pop", "gdp"]',
help='Features that are required, and timeseries samples will only be included for complete'
'data. Should be mutually exclusive with feat_target and feats_nans_allowed.'
'Available options are listed in the arg --all_feats'
)
parser.add_argument('-fna', '--feats_nans_allowed',
default='["elec_prod", "coal_prod", "coal_netexp", "ng_prod", "ng_netexp", "oil_prod", "oil_netexp", "renew_prod", "battle_deaths", "cdd", "hdd"]',
help='Features that are required, and timeseries samples will allow nan values in'
'data. Should be mutually exclusive with feats_complete_req'
'Available options are listed in the arg --all_feats')
parser.add_argument('-af', '--all_feats',
default='["pop", "gdp", "temp", "rain", "res_elec_cons", "ci_elec_cons", "tot_elec_cons", "elec_prod", "elec_netexp", "coal_prod", "coal_netexp", "ng_prod", "ng_netexp", "oil_prod", "oil_netexp", "renew_prod", "renew_netexp", "battle_deaths", "cdd", "hdd"]',
help='All features to add')
parser.add_argument('-pcf', '--per_capita_flag', default=True,
help='A flag to determine whether consumption conisdered as absolute values, or on a per capita basis.')
parser.add_argument('-fnt', '--fold_num_test', type=int, default=2,
help='the fold number for k-fold cross-validation')
parser.add_argument('-fnv', '--fold_num_val', type=int, default=0,
help='the fold number for k-fold cross-validation')
parser.add_argument('-nf', '--num_folds', type=int, default=26,
help='the number of folds for k-fold cross-validation')
parser.add_argument('-ypr', '--years_pre', type=int, default=15,
help='years for training')
parser.add_argument('-ypo', '--years_post', type=int, default=15,
help='years for forecasting')
parser.add_argument('-st', '--scale_type',
default='minmax',
help='Type of scaling to apply. Options include: "minmax" and "zscore")')
parser.add_argument('-nzan',
'--nans_to_zeros_after_norm',
type=ug.str2bool,
nargs='?',
const=True,
default=True,
help='A flag to convert nans to zeros. Currently, this needs to be "True".')
parser.add_argument('-rfg1',
'--remove_feat_groups_of_1',
type=ug.str2bool,
nargs='?',
const=True,
default=True,
help='A flag whether to include data augmentation by removing feat groups of 1')
parser.add_argument('-rfg2',
'--remove_feat_groups_of_2',
type=ug.str2bool,
nargs='?',
const=True,
default=True,
help='A flag whether to include data augmentation by removing feat groups of 2')
parser.add_argument('-rfg3',
'--remove_feat_groups_of_3',
type=ug.str2bool,
nargs='?',
const=True,
default=False,
help='A flag whether to include data augmentation by removing feat groups of 3')
parser.add_argument('-rfg4',
'--remove_feat_groups_of_4',
type=ug.str2bool,
nargs='?',
const=True,
default=False,
help='A flag whether to include data augmentation by removing feat groups of 4')
parser.add_argument('-rfg5',
'--remove_feat_groups_of_5',
type=ug.str2bool,
nargs='?',
const=True,
default=False,
help='A flag whether to include data augmentation by removing feat groups of 5')
parser.add_argument('-rtg1',
'--remove_timestep_groups_of_1',
type=ug.str2bool,
nargs='?',
const=True,
default=True,
help='A flag whether to include data augmentation by removing timestep groups of 1')
parser.add_argument('-rtg2',
'--remove_timestep_groups_of_2',
type=ug.str2bool,
nargs='?',
const=True,
default=True,
help='A flag whether to include data augmentation by removing timestep groups of 2')
parser.add_argument('-rtg3',
'--remove_timestep_groups_of_3',
type=ug.str2bool,
nargs='?',
const=True,
default=False,
help='A flag whether to include data augmentation by removing timestep groups of 3')
parser.add_argument('-rtg4',
'--remove_timestep_groups_of_4',
type=ug.str2bool,
nargs='?',
const=True,
default=False,
help='A flag whether to include data augmentation by removing timestep groups of 4')
parser.add_argument('-rtg5',
'--remove_timestep_groups_of_5',
type=ug.str2bool,
nargs='?',
const=True,
default=False,
help='A flag whether to include data augmentation by removing timestep groups of 5')
return parser.parse_args(args)
def lookup_country_name(code):
args = parse_args([])
filtered_df = pd.read_csv(args.cntry_path)
filtered_df['Country_Name_short'] = np.array([c.split(',')[0].split('(')[0] for c in filtered_df['Country_Name']])
filtered_df.loc[
filtered_df['Country_Name'] == 'Congo, Republic of the', 'Country_Name_short'] = 'Congo, Republic of the'
filtered_df.loc[
filtered_df[
'Country_Name'] == 'Congo, Democratic Republic of the', 'Country_Name_short'] = 'Congo, Democratic Republic of the'
filtered_df = filtered_df[filtered_df['Three_Letter_Country_Code'] == code]
country_name = filtered_df['Country_Name_short'].values[0]
continent_name = filtered_df['Continent_Name'].values[0]
return country_name, continent_name
def lookup_country_name_from_array(countries_acronyms):
country_names = []
continent_names = []
for acr in countries_acronyms:
country_name, continent_name = lookup_country_name(acr)
country_names.append(country_name)
continent_names.append(continent_name)
return np.array(country_names), np.array(continent_names)
def replace_feat_name_with_formal_name(feat_names):
conversion_dict = {
'cntry': 'Country',
'year_ind': 'Year Ind.',
'pc_elec_cons': 'Elec. Cons. p.c.',
'tot_elec_cons': 'Elec. Cons.',
'year': 'Year',
'pop': 'Pop.',
'gdp': 'GDP p.c.',
'temp': 'Avg. Temp.',
'rain': 'Avg. Rain.',
'elec_prod': 'Elec. Prod.',
'coal_prod': 'Coal Prod.',
'coal_netexp': 'Coal Net Exp.',
'ng_prod': 'Nat. Gas Prod.',
'ng_netexp': 'Nat. Gas Net Exp.',
'oil_prod': 'Oil Prod.',
'oil_netexp': 'Oil Net Exp.',
'renew_prod': 'Renew. Prod.',
'battle_deaths': 'Bat. Deaths',
'cdd': 'Cool Deg. Days',
'hdd': 'Heat Deg. Days',
'mean_grad_tot_elec_cons': 'Elec. Cons.',
'mean_grad_pc_elec_cons': 'Elec. Cons. p.c.',
'mean_grad_year': 'Year',
'mean_grad_pop': 'Pop.',
'mean_grad_gdp': 'GDP p.c.',
'mean_grad_temp': 'Avg. \n Temp.',
'mean_grad_rain': 'Avg. \n Rain.',
'std_grad_tot_elec_cons': 'Elec. Cons.',
'std_grad_pc_elec_cons': 'Elec. Cons. p.c.',
'std_grad_year': 'Year',
'std_grad_pop': 'Pop.',
'std_grad_gdp': 'GDP p.c.',
'std_grad_temp': 'Avg. \n Temp.',
'std_grad_rain': 'Avg. \n Rain.'
}
output_list = []
for feat_name in feat_names:
output_list.append(conversion_dict[feat_name])
return output_list
@cached_with_io
def get_elec_from_wes(wes_input_path):
# load iea wes data
iea_wes_df = pd.read_csv(wes_input_path, encoding='ISO-8859-1')
def extract_wes_prod_and_netexp(iea_wes_fuel_df, fuel_name='fuel_name'):
iea_wes_fuel_df = iea_wes_fuel_df.drop(['Country', 'PRODUCT', 'Product', 'FLOW', 'TIME', 'Flag Codes', 'Flags'],
axis=1)
iea_wes_fuel_df = iea_wes_fuel_df.dropna(subset=['Value'])
iea_wes_fuel_df = iea_wes_fuel_df.rename(
columns={'"COUNTRY"': "country_code", 'Flow': 'flow', 'Time': 'year',
'Value': 'value'})
# filter residential consumption
res_wes_cons_df = iea_wes_fuel_df[iea_wes_fuel_df['flow'] == 'Residential']
res_wes_cons_df = res_wes_cons_df[res_wes_cons_df['value'] != 0]
res_wes_cons_df = res_wes_cons_df.drop(['flow'], axis=1)
res_wes_cons_df = res_wes_cons_df.rename(columns={'value': f"res_{fuel_name}_cons"})
# filter final consumption
tot_wes_cons_df = iea_wes_fuel_df.loc[iea_wes_fuel_df['flow'] == 'Final consumption']
tot_wes_cons_df = tot_wes_cons_df[tot_wes_cons_df['value'] != 0]
tot_wes_cons_df = tot_wes_cons_df.drop(['flow'], axis=1)
tot_wes_cons_df = tot_wes_cons_df.rename(columns={'value': f"tot_{fuel_name}_cons"})
# # filter c&i consumption
ci_wes_cons_df = pd.merge(res_wes_cons_df, tot_wes_cons_df, on=["country_code", "year"])
ci_wes_cons_df[f'ci_{fuel_name}_cons'] = ci_wes_cons_df[f'tot_{fuel_name}_cons'] - ci_wes_cons_df[
f'res_{fuel_name}_cons']
ci_wes_cons_df = ci_wes_cons_df.drop([f'tot_{fuel_name}_cons', f'res_{fuel_name}_cons'], axis=1)
# filter production
wes_prod_df = iea_wes_fuel_df[iea_wes_fuel_df['flow'] == 'Production']
wes_prod_df = wes_prod_df[wes_prod_df['value'] != 0]
wes_prod_df = wes_prod_df.drop(['flow'], axis=1)
wes_prod_df = wes_prod_df.rename(columns={'value': f"{fuel_name}_prod"})
# filter imports
wes_imp_df = iea_wes_fuel_df[iea_wes_fuel_df['flow'] == 'Imports']
wes_imp_df = wes_imp_df[wes_imp_df['value'] != 0]
wes_imp_df = wes_imp_df.drop(['flow'], axis=1)
wes_imp_df = wes_imp_df.rename(columns={'value': f"{fuel_name}_imp"})
# filter exports
wes_exp_df = iea_wes_fuel_df[iea_wes_fuel_df['flow'] == 'Exports']
wes_exp_df = wes_exp_df[wes_exp_df['value'] != 0]
wes_exp_df = wes_exp_df.drop(['flow'], axis=1)
wes_exp_df = wes_exp_df.rename(columns={'value': f"{fuel_name}_exp"})
# calc net exports
wes_netexp_df = pd.merge(wes_imp_df, wes_exp_df, on=["country_code", "year"])
wes_netexp_df[f'{fuel_name}_netexp'] = - wes_netexp_df[f'{fuel_name}_exp'] - wes_netexp_df[f'{fuel_name}_imp']
wes_netexp_df = wes_netexp_df.drop([f'{fuel_name}_exp', f'{fuel_name}_imp'], axis=1)
return res_wes_cons_df, ci_wes_cons_df, tot_wes_cons_df, wes_prod_df, wes_netexp_df
iea_elec_df = iea_wes_df[iea_wes_df['Product'] == 'Electricity (GWh)']
res_elec_cons_df, ci_elec_cons_df, tot_elec_cons_df, elec_prod_df, elec_netexp_df = extract_wes_prod_and_netexp(
iea_elec_df, fuel_name='elec')
iea_oil_df = iea_wes_df[iea_wes_df['Product'] == 'Crude oil (kt)']
_, _, _, oil_prod_df, oil_netexp_df = extract_wes_prod_and_netexp(
iea_oil_df, fuel_name='oil')
return res_elec_cons_df, ci_elec_cons_df, tot_elec_cons_df, elec_prod_df, elec_netexp_df, oil_prod_df, oil_netexp_df
@cached_with_io
def get_fuel_production_and_netexp_from_web(web_input_path):
# get all production and netexports data values for coal, ng, oil, and renewables
def extract_web_prod_and_netexp(iea_web_fuel_df, fuel_name='fuel_name'):
# takes in world energy balances (web) dataframe for a specific fuel, and outputs properly formatted
# dataframes for production and net exports
# get fuel prod
iea_web_fuel_prod_df = iea_web_fuel_df[iea_web_fuel_df['FLOW'] == 'INDPROD']
iea_web_fuel_prod_df = iea_web_fuel_prod_df.drop(
["Country", '"UNIT"', 'Unit', 'PRODUCT', 'Product', 'FLOW', 'Flow', 'TIME', 'Flag Codes', 'Flags'],
axis=1)
iea_web_fuel_prod_df = iea_web_fuel_prod_df.dropna(subset=['Value'])
iea_web_fuel_prod_df = iea_web_fuel_prod_df.rename(
columns={'COUNTRY': "country_code", 'Time': 'year',
'Value': f'{fuel_name}_prod'})
# get fuel imp
iea_web_fuel_imp_df = iea_web_fuel_df[iea_web_fuel_df['FLOW'] == 'IMPORTS']
iea_web_fuel_imp_df = iea_web_fuel_imp_df.drop(
["Country", '"UNIT"', 'Unit', 'PRODUCT', 'Product', 'FLOW', 'Flow', 'TIME', 'Flag Codes', 'Flags'],
axis=1)
iea_web_fuel_imp_df = iea_web_fuel_imp_df.dropna(subset=['Value'])
iea_web_fuel_imp_df = iea_web_fuel_imp_df.rename(
columns={'COUNTRY': "country_code", 'Time': 'year',
'Value': f'{fuel_name}_imp'})
# get fuel exp
iea_web_fuel_exp_df = iea_web_fuel_df[iea_web_fuel_df['FLOW'] == 'EXPORTS']
iea_web_fuel_exp_df = iea_web_fuel_exp_df.drop(
["Country", '"UNIT"', 'Unit', 'PRODUCT', 'Product', 'FLOW', 'Flow', 'TIME', 'Flag Codes', 'Flags'],
axis=1)
iea_web_fuel_exp_df = iea_web_fuel_exp_df.dropna(subset=['Value'])
iea_web_fuel_exp_df = iea_web_fuel_exp_df.rename(
columns={'COUNTRY': "country_code", 'Time': 'year',
'Value': f'{fuel_name}_exp'})
# get fuel netexp
iea_web_fuel_netexp_df = pd.merge(iea_web_fuel_imp_df, iea_web_fuel_exp_df,
on=["country_code", "year"])
iea_web_fuel_netexp_df[f'{fuel_name}_netexp'] = - iea_web_fuel_netexp_df[f'{fuel_name}_exp'] - \
iea_web_fuel_netexp_df[
f'{fuel_name}_imp']
iea_web_fuel_netexp_df = iea_web_fuel_netexp_df.drop([f'{fuel_name}_exp', f'{fuel_name}_imp'], axis=1)
return iea_web_fuel_prod_df, iea_web_fuel_netexp_df
# load all web data
iea_web_df = pd.read_csv(web_input_path, encoding='ISO-8859-1')
iea_web_df = iea_web_df[iea_web_df['Unit'] == 'TJ']
# filter for coal data
iea_web_coal_df = iea_web_df[iea_web_df['PRODUCT'] == 'COAL']
coal_prod_df, coal_netexp_df = extract_web_prod_and_netexp(iea_web_coal_df, fuel_name='coal')
# filter for ng data
iea_web_ng_df = iea_web_df[iea_web_df['PRODUCT'] == 'NATGAS']
ng_prod_df, ng_netexp_df = extract_web_prod_and_netexp(iea_web_ng_df, fuel_name='ng')
# # filter for oil data
# iea_web_oil_df = iea_web_df[iea_web_df['PRODUCT'] == 'TOTPRODS']
# oil_prod_df, oil_netexp_df = extract_web_prod_and_netexp(iea_web_oil_df, fuel_name='oil')
# filter for renewables data
iea_web_renew_df = iea_web_df[iea_web_df['PRODUCT'] == 'MRENEW']
renew_prod_df, renew_netexp_df = extract_web_prod_and_netexp(iea_web_renew_df, fuel_name='renew')
return coal_prod_df, coal_netexp_df, ng_prod_df, ng_netexp_df, renew_prod_df, renew_netexp_df
@cached_with_io
def get_pop(input_path):
pop_df = pd.read_csv(input_path, encoding='ISO-8859-1')
pop_df = pop_df.melt(id_vars=["Country Code", "Country Name"], var_name="year", value_name="pop")
pop_df = pop_df.rename(columns={"Country Code": "country_code"})
pop_df = pop_df.drop(["Country Name"], axis=1)
pop_df.year = pop_df.year.astype('int64')
pop_df = pop_df.dropna()
pop_df = pop_df[pop_df['pop'] > 0.0]
return pop_df
@cached_with_io
def get_gdp(input_path, input_path_somalia):
# Indicator Name: GDP per capita (constant 2010 US$)
gdp_df = pd.read_csv(input_path, encoding='ISO-8859-1', skiprows=4)
gdp_som_df = pd.read_csv(input_path_somalia, encoding='ISO-8859-1', skiprows=0)
gdp_df = gdp_df.drop(['Indicator Code', 'Indicator Name', 'Country Name'], axis=1)
gdp_df = gdp_df.melt(id_vars=['Country Code'], var_name="year", value_name="gdp")
gdp_df = gdp_df.rename(columns={"Country Code": "country_code"})
gdp_df.year = gdp_df.year.astype('int64')
gdp_df = gdp_df.dropna()
som_years = gdp_som_df.shape[0]
gdp_som_df['country_code'] = np.repeat('SOM', som_years)
gdp_som_df['year'] = gdp_som_df['Year'].values
gdp_som_df['gdp'] = gdp_som_df['Value'].values
gdp_som_df = gdp_som_df.drop(['Country or Area', 'Year', 'Item', 'Value'], axis=1)
gdp_df = gdp_df.append(gdp_som_df)
gdp_df = gdp_df[gdp_df['gdp'] > 0.0]
return gdp_df
@cached_with_io
def get_temp(input_path):
temp_df = pd.read_excel(input_path)
temp_df['Year'] = temp_df['Year'].astype(int)
temp_df['Country'] = temp_df['Country'].str.strip()
temp_df['ISO3'] = temp_df['ISO3'].str.strip()
temp_df = temp_df.rename(
columns={"Country": "country", "ISO3": "country_code", "Year": "year", "Temperature": "temp"})
# cleaning problems in the data
temp_df.loc[
temp_df['country'] == 'Tanzania', 'country_code'] = 'TZA'
temp_df = temp_df.drop(['country'], axis=1)
return temp_df
@cached_with_io
def get_rain(input_path):
rain_df = pd.read_excel(input_path)
rain_df['Year'] = rain_df['Year'].astype(float)
rain_df['Country'] = rain_df['Country'].str.strip()
rain_df['ISO3'] = rain_df['ISO3'].str.strip()
rain_df = rain_df.rename(
columns={"Country": "country", "ISO3": "country_code", "Year": "year", "Rainfall (mm)": "rain"})
# cleaning problems in the data
rain_df.loc[
rain_df['country'] == 'Tanzania', 'country_code'] = 'TZA'
rain_df = rain_df.drop(['country'], axis=1)
return rain_df
@cached_with_io
def get_battle_deaths(battle_deaths_path):
battle_deaths_df = pd.read_csv(battle_deaths_path, encoding='ISO-8859-1', skiprows=4)
battle_deaths_df = battle_deaths_df.melt(
id_vars=["Country Name", "Country Code", "Indicator Name", "Indicator Code"], var_name="year",
value_name="battle_deaths")
battle_deaths_df = battle_deaths_df.dropna(subset=['battle_deaths'])
battle_deaths_df = battle_deaths_df.drop(['Country Name', "Indicator Name", "Indicator Code"], axis=1)
battle_deaths_df = battle_deaths_df.rename(columns={'Country Code': "country_code"})
battle_deaths_df.year = battle_deaths_df.year.astype('int64')
return battle_deaths_df
@cached_with_io
def get_hdd_cdd(cdd_iea_path, hdd_iea_path, cdd_hdd_atalla_path, plot_atalla_vs_iea=False):
# melt cdd_atalla into right format
cdd_atalla_df = pd.read_excel(cdd_hdd_atalla_path,
sheet_name='t2m.cdd.18C_daily_freq_iso')
cdd_atalla_df = cdd_atalla_df.melt(id_vars=["Country", "ISO"], var_name="year", value_name="cdd_atalla")
cdd_atalla_df = cdd_atalla_df.drop(['Country'], axis=1)
cdd_atalla_df = cdd_atalla_df.rename(columns={"ISO": "country_code"})
cdd_atalla_df.year = cdd_atalla_df.year.astype('int64')
# melt hdd_atalla into right format
hdd_atalla_df = pd.read_excel(cdd_hdd_atalla_path,
sheet_name='T2m.hdd.18C_daily_freq_iso')
hdd_atalla_df = hdd_atalla_df.melt(id_vars=["Country", "ISO"], var_name="year", value_name="hdd_atalla")
hdd_atalla_df = hdd_atalla_df.drop(['Country'], axis=1)
hdd_atalla_df = hdd_atalla_df.rename(columns={"ISO": "country_code"})
hdd_atalla_df.year = hdd_atalla_df.year.astype('int64')
if plot_atalla_vs_iea:
# below is code to plot the atalla et al cdd and hdd data sets with the iea data set. The preliminary
# conclusion about these two datasets is that they are incompatible. Even though they claim to be producing
# the same thing, their methodologies are different and they give incongruent data for overlapping years
# melt cdd_iea into right format
cdd_iea_df = pd.read_excel(cdd_iea_path)
cdd_iea_df = cdd_iea_df.melt(id_vars=["Country", "ISO Code"], var_name="year", value_name="cdd_iea")
cdd_iea_df = cdd_iea_df.drop(['Country'], axis=1)
cdd_iea_df = cdd_iea_df.rename(columns={"ISO Code": "country_code"})
# melt hdd_iea into right format
hdd_iea_df = pd.read_excel(hdd_iea_path)
hdd_iea_df = hdd_iea_df.melt(id_vars=["Country", "ISO Code"], var_name="year", value_name="hdd_iea")
hdd_iea_df = hdd_iea_df.drop(['Country'], axis=1)
hdd_iea_df = hdd_iea_df.rename(columns={"ISO Code": "country_code"})
# get intersection of country codes for cdd entries
cdd_countries = cdd_atalla_df['country_code'].unique().tolist()
cdd_countries.extend(cdd_iea_df['country_code'].unique().tolist())
cdd_countries = np.unique(cdd_countries)
cdd_countries_intersect = np.intersect1d(cdd_atalla_df['country_code'].unique(),
cdd_iea_df['country_code'].unique())
# get intersection of country codes for cdd entries
hdd_countries = hdd_atalla_df['country_code'].unique().tolist()
hdd_countries.extend(hdd_iea_df['country_code'].unique().tolist())
hdd_countries = np.unique(hdd_countries)
hdd_countries_intersect = np.intersect1d(hdd_atalla_df['country_code'].unique(),
hdd_iea_df['country_code'].unique())
# make cdd out path
cdd_out_path = os.path.join(project_root, 'out', 'cdd')
os.makedirs(cdd_out_path, exist_ok=True)
# make cdd out path
hdd_out_path = os.path.join(project_root, 'out', 'hdd')
os.makedirs(hdd_out_path, exist_ok=True)
# print pngs of plots by country
for cntry in cdd_countries_intersect:
x1 = cdd_atalla_df[cdd_atalla_df['country_code'] == cntry]['year'].values
y1 = cdd_atalla_df[cdd_atalla_df['country_code'] == cntry]['cdd_atalla'].values
x2 = cdd_iea_df[cdd_iea_df['country_code'] == cntry]['year'].values
y2 = cdd_iea_df[cdd_iea_df['country_code'] == cntry]['cdd_iea'].values
plt.figure()
plt.plot(x1, y1, label='Atalla et al.')
plt.plot(x2, y2, label='IEA')
plt.title(f'CDD data for {lookup_country_name(cntry)[0]}')
out_path = os.path.join(cdd_out_path, f'{cntry}.png')
plt.savefig(out_path)
plt.close()
# print pngs of plots by country
for cntry in hdd_countries_intersect:
x1 = hdd_atalla_df[hdd_atalla_df['country_code'] == cntry]['year'].values
y1 = hdd_atalla_df[hdd_atalla_df['country_code'] == cntry]['hdd_atalla'].values
x2 = hdd_iea_df[hdd_iea_df['country_code'] == cntry]['year'].values
y2 = hdd_iea_df[hdd_iea_df['country_code'] == cntry]['hdd_iea'].values
plt.figure()
plt.plot(x1, y1, label='Atalla et al.')
plt.plot(x2, y2, label='IEA')
plt.title(f'hdd data for {lookup_country_name(cntry)[0]}')
out_path = os.path.join(hdd_out_path, f'{cntry}.png')
plt.savefig(out_path)
plt.close()
cdd_df = cdd_atalla_df
hdd_df = hdd_atalla_df
cdd_df = cdd_df.rename(columns={"cdd_atalla": "cdd"})
hdd_df = hdd_df.rename(columns={"hdd_atalla": "hdd"})
return cdd_df, hdd_df
def compile_data(args, all_feats=None, feat_target=None, feats_complete_req=None,
feats_nans_allowed=None):
# Compile data from disparate sources, format them all the same way, and
# combine them into a single df, df_all. Return a full dictionary of
# this data as output.
# Take advantage of caching given parameter settings for faster subsequent runs.
# Note that any changes to data sources requires caches to be cleared.
# convert args to hash
args = parse_args(args)
args = args.__dict__
# override with params
if all_feats == None:
pass
else:
args['all_feats'] = all_feats
if feat_target == None:
pass
else:
args['feat_target'] = feat_target
if feats_complete_req == None:
pass
else:
args['feats_complete_req'] = feats_complete_req
if feats_nans_allowed == None:
pass
else:
args['feats_nans_allowed'] = feats_nans_allowed
args_string = json.dumps(args)
args_hash = hashlib.sha256(args_string.encode('utf-8')).hexdigest()
args_hash_pickle_path = Path(os.path.join(project_root, 'data', 'processed', args_hash + '.p'))
# save to file, if not already a file
if not args_hash_pickle_path.is_file():
# load pop gdp temp rain data
pop_df = get_pop(args['pop_path'])
gdp_df = get_gdp(args['gdp_path'], args['gdp_path_somalia'])
temp_df = get_temp(args['temp_path'])
rain_df = get_rain(args['rain_path'])
# load elec an oil data
res_elec_cons_df, ci_elec_cons_df, tot_elec_cons_df, elec_prod_df, elec_netexp_df, oil_prod_df, oil_netexp_df = get_elec_from_wes(
args['iea_wes_path'])
# load coal and ng data
coal_prod_df, coal_netexp_df, \
ng_prod_df, ng_netexp_df, \
renew_prod_df, renew_netexp_df = \
get_fuel_production_and_netexp_from_web(args['iea_web_path'])
# load cdd, hdd, and battle deaths data
cdd_df, hdd_df = get_hdd_cdd(args['cdd_iea_path'], args['hdd_iea_path'], args['cdd_hdd_atalla_path'])
battle_deaths_df = get_battle_deaths(args['battle_deaths_path'])
# get rid of country labels for all dfs
scope = locals()
# load dfs to merge together
dfs_all_feats = [feat + '_df' for feat in json.loads(args['all_feats'])]
df_feat_target = args['feat_target'] + '_df'
dfs_to_load_complete = [feat + '_df' for feat in json.loads(args['feats_complete_req'])]
dfs_to_load_nans = [feat + '_df' for feat in json.loads(args['feats_nans_allowed'])]
# turn into dict of dataframes
df_dict = {df_name: eval(df_name, scope) for df_name in dfs_all_feats}
# merge of dataframes, requiring complete data
df_combination_uid = 't_' + df_feat_target + '_c_'
for i, df_name in enumerate(dfs_to_load_complete):
df_combination_uid = df_combination_uid + df_name.replace('_df', '')[0] + df_name.replace('_df', '')[-1]
if i == 0:
df_all = eval(df_name, scope)
else:
if df_name == 'year_df':
continue
df_all = pd.merge(df_all, eval(df_name, scope), on=["country_code", "year"])
# merge of dataframes, allowing nan values
df_combination_uid = df_combination_uid + '_n_'
for i, df_name in enumerate(dfs_to_load_nans):
df_all = pd.merge(df_all, eval(df_name, scope), how='left', on=["country_code", "year"])
df_combination_uid = df_combination_uid + df_name.replace('_df', '')[0] + df_name.replace('_df', '')[-1]
# add in target, but do not require the column to have complete data
df_all = pd.merge(df_all, eval(df_feat_target, scope), how='left', on=["country_code", "year"])
# reorder dataframe columns
cols = df_all.columns.tolist()
cols = cols[:2] + cols[-1:] + cols[2:-1]
df_all = df_all[cols]
df_dict[df_combination_uid] = df_all
df_dict['df_all'] = df_all
# saving dict of dfs
pickle.dump((df_dict, df_combination_uid), open(args_hash_pickle_path, "wb"), protocol=4)
else:
df_dict, df_combination_uid = pickle.load(open(args_hash_pickle_path, "rb"))
return df_dict, df_combination_uid
def get_target_feat_name(per_capita_flag=True):
if per_capita_flag:
target_feat_name = 'pc_elec_cons'
else:
target_feat_name = 'tot_elec_cons'
return target_feat_name
def split_dataset_cons(dataset_norm, target_feat_name=None, fold_num_test=0, fold_num_val=1, num_folds=26, years_pre=15,
years_post=15):
# split a multivariate dataset into train/test sets.
# The logic here is to loop through every country in the given normalized dataset
# For a given country, we calculate the number of iterations for which data is available
# given end- and start-years. We the query for these years.
# We keep track of fold_num_test and num_folds. Only African countries are added to leave-one-out
# cross validation folds.
countries = np.unique(dataset_norm['country_code'].values)
######################################
# populate train, val, and test
######################################
train_x = []
train_y = []
val_x = []
val_y = []
test_x = []
test_y = []
# loop through each unique country
# the logic is to track non-african countries separately from african countries.
# only african countries are ever added to the test splits. Non-african countries are
# always in "train"
afr_i = 0
for c, country in enumerate(countries):
try:
country_name, continent_name = lookup_country_name(country)
except:
print(f'cannot find country: {country}')
continue
# get just the country's data
data_temp_df = dataset_norm[dataset_norm['country_code'] == country]
start_year = np.min(data_temp_df['year_orig'].values)
end_year = np.max(data_temp_df['year_orig'].values)
# calculate the number of iterations to do for a given country.
# We assume that all years within the start and end years have
# entries present. Empirically, looking at the data (for population and gdp)
# ths is true
num_iters_train_val_test = end_year - start_year - years_pre - years_post + 2
# if you don't have enough training years for this country, skip it
if num_iters_train_val_test < 1:
continue
# increment africa counter flag
increment_africa_counter = False
# if you do have enough training years, start to define training entries and test entry(ies)
for i in range(num_iters_train_val_test):
# define start and end indices
x_start = start_year + i
x_end = start_year + years_pre + i
y_end = start_year + years_pre + years_post + i
x_temp_df_sample = data_temp_df[(data_temp_df['year_orig'] >= x_start)
& (data_temp_df['year_orig'] < x_end)]
y_temp_df_sample = data_temp_df[(data_temp_df['year_orig'] >= x_end)
& (data_temp_df['year_orig'] < y_end)]
# now we need to determine whether the whole y series has consumption data (our target feature)
# If not, we skip it. If so, then we can use it for training/validation/test. If not,
if (y_temp_df_sample[target_feat_name] < np.finfo(float).eps).any():
continue
if (continent_name == 'Africa'):
increment_africa_counter = True
# append into arrays if possible!
if (continent_name == 'Africa') and (afr_i % num_folds == fold_num_test):
test_x.append(x_temp_df_sample)
test_y.append(y_temp_df_sample)
print(f'added {country_name} to test')
if (continent_name == 'Africa') and (afr_i % num_folds == fold_num_val):
val_x.append(x_temp_df_sample)
val_y.append(y_temp_df_sample)
print(f'added {country_name} to val')
if not ((continent_name == 'Africa') and (afr_i % num_folds == fold_num_val)) and \
not ((continent_name == 'Africa') and (afr_i % num_folds == fold_num_test)):
train_x.append(x_temp_df_sample)
train_y.append(y_temp_df_sample)
if increment_africa_counter:
afr_i = afr_i + 1
######################################
# populate run_forward
# this tries to make forecasts using the latest available data for a given country
######################################
future_x = []
all_cons = []
# loop through each unique country
for c, country in enumerate(countries):
try:
lookup_country_name(country)
except:
print(f'cannot find country: {country}')
continue
# get just the country's data
data_temp_df = dataset_norm[dataset_norm['country_code'] == country]
end_year = np.max(data_temp_df['year_orig'].values)
start_year = end_year - years_pre
x_temp_df_sample = data_temp_df[
(data_temp_df['year_orig'] > start_year) & (data_temp_df['year_orig'] <= end_year)]
future_x.append(x_temp_df_sample)
all_cons.append(data_temp_df)
######################################
# populate run_hist
# this looks for historical data for all countries
######################################
# calculate start and end years for hist preds
# kenya_last_year = np.max(dataset_norm[dataset_norm['country_code'] == 'KEN'].index.get_level_values(0).values)
# end_year = kenya_last_year - years_post + 1
# start_year = end_year - years_pre
countries = np.unique(dataset_norm['country_code'].values)
hist_x = []
# loop through each unique country
for c, country in enumerate(countries):
try:
lookup_country_name(country)
except:
print(f'cannot find country: {country}')
continue
# get just the country's data
data_temp_df = dataset_norm[dataset_norm['country_code'] == country]
start_year = np.min(data_temp_df['year_orig'].values)
end_year = np.max(data_temp_df['year_orig'].values)
# calculate the number of iterations to do for a given country.
# We assume that all years within the start and end years have
# entries present. Empirically, looking at the data (for population and gdp)
# ths is true
num_iters_hist = end_year - start_year - years_pre - years_post + 2
# if you don't have enough training years for this country, skip it
if num_iters_hist < 1:
continue
# if you do have enough training years, start to define training entries and test entry(ies)
for i in range(num_iters_hist):
# define start and end indices
x_start = start_year + i
x_end = start_year + years_pre + i
x_temp_df_sample = data_temp_df[(data_temp_df['year_orig'] >= x_start)
& (data_temp_df['year_orig'] < x_end)]
hist_x.append(x_temp_df_sample)
# process the dfs to make lists of values and years, which is required for
# keras input data formats for LSTM models
def unravel_df_list(df_lists, x_or_y='x', years_pre=15, years_post=15):
list_to_collapse = []
list_to_collapse_ts = []
list_to_collapse_cntry = []
for df_list in df_lists:
if (x_or_y == 'x') and (df_list['year_orig'].values.size != years_pre):
print(f"skipping forecasts for {np.unique(df_list['country_code'].values)}")
continue
if (x_or_y == 'y') and (df_list['year_orig'].values.size != years_post):
print(f"skipping forecasts for {np.unique(df_list['country_code'].values)}")
continue
list_to_collapse_ts.append(df_list['year_orig'].values)
list_to_collapse_cntry.append(df_list['country_code'].values)
list_to_collapse.append(df_list.drop(columns=['country_code', 'year_orig']).values)
try:
collapse_ts = np.array(list_to_collapse_ts, dtype=int)
collapse_cntry = np.array(list_to_collapse_cntry)
collapse_ts = collapse_ts.reshape(collapse_ts.shape[0], collapse_ts.shape[1], 1)
collapse_cntry = collapse_cntry.reshape(collapse_cntry.shape[0], collapse_cntry.shape[1], 1)
collapse_stacked = np.array(list_to_collapse)
except:
print('asdf in index 23491351253')
# reshape 3d tensor if y, since we are doing a single output type
if x_or_y == 'y':
collapse_stacked = collapse_stacked[:, :, 0].reshape(collapse_stacked.shape[0], collapse_stacked.shape[1])
return collapse_stacked, collapse_ts, collapse_cntry
print('asdf')
# reshape to numpy arrays
train_x, train_x_ts, train_x_cntry = \
unravel_df_list(train_x,
x_or_y='x',
years_pre=years_pre,
years_post=years_post)
train_y, train_y_ts, train_y_cntry = \
unravel_df_list(train_y,
x_or_y='y',
years_pre=years_pre,
years_post=years_post)
val_x, val_x_ts, val_x_cntry = \
unravel_df_list(val_x,
x_or_y='x',
years_pre=years_pre,
years_post=years_post)
val_y, val_y_ts, val_y_cntry = \
unravel_df_list(val_y,
x_or_y='y',
years_pre=years_pre,
years_post=years_post)
test_x, test_x_ts, test_x_cntry = \
unravel_df_list(test_x,
x_or_y='x',
years_pre=years_pre,
years_post=years_post)
test_y, test_y_ts, test_y_cntry = \
unravel_df_list(test_y,
x_or_y='y',
years_pre=years_pre,
years_post=years_post)
future_x, future_x_ts, future_x_cntry = \
unravel_df_list(future_x,
x_or_y='x',
years_pre=years_pre,
years_post=years_post)
hist_x, hist_x_ts, hist_x_cntry = \
unravel_df_list(hist_x,
x_or_y='x',
years_pre=years_pre,
years_post=years_post)
all_cons = | pd.concat(all_cons) | pandas.concat |
"""Tests for arithmetic.py"""
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
from timeflux.core.io import Port
from timeflux_example.nodes.arithmetic import Add, MatrixAdd
def test_add():
node = Add(1)
node.i = Port()
node.i.data = pd.DataFrame([[1, 1], [1, 1]])
node.update()
expected = pd.DataFrame([[2, 2], [2, 2]])
assert_frame_equal(node.o.data, expected)
def test_matrix():
node = MatrixAdd()
node.i_m1 = Port()
node.i_m2 = Port()
node.i_m1.data = pd.DataFrame([[1, 1], [1, 1]])
node.i_m2.data = pd.DataFrame([[2, 2], [2, 2]])
node.update()
expected = | pd.DataFrame([[3, 3], [3, 3]]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
# Series Problem
s1 = pd.Series(-3, index=range(2, 11, 2))
s2 = pd.Series({'Bill':31, 'Sarah':28, 'Jane':34, 'Joe':26})
# Random Walk Problem
# five random walks of length 100 plotted together
N = 100
for i in xrange(5):
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .5, size=N-1)*2-1
s1 = pd.Series(s1)
s1 = s1.cumsum()
s1.plot()
plt.show()
# biased random walks
N = 100 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(311)
s1.plot()
N = 10000 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(312)
s1.plot()
N = 100000 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(313)
s1.plot()
plt.show()
# SQL SELECT problem
studentInfo[(studentInfo['Age']>19)&(studentInfo['Sex']=='M')][['ID', 'Name']]
# SQL JOIN problem
pd.merge(studentInfo[studentInfo['Sex']=='M'], otherInfo, on='ID')[['ID', 'Age', 'GPA']]
# final Crime Data problem
# load in the data
crimeDF = | pd.read_csv("crime_data.txt", header=1, skiprows=0, index_col=0) | pandas.read_csv |
import torch
import numpy as np
import matplotlib.pyplot as plt
from yasa import get_bool_vector, spindles_detect
from EEG.templates import get_templates
import pandas as pd
import pickle as pkl
import glob
def plot_cam(saved_model_name, signal_name, plot_inds, test_loader, model, cam_target, label='normal',
use_grad_cam=False, use_relu=True, grad_weight=True, ds=False):
label_lookup = ['Wake', 'N1', 'N2', 'N3', 'REM']
if ds:
fs = 80 # [Hz]
signal_len = 2400
conv0_len = 2700 # signal_len / 2
else:
fs = 125 # [Hz]
signal_len = 15000
conv0_len = 1875 # signal_len/(2**3)
time_series, true_label, name = test_loader.dataset[0]
time_series_tensor = torch.reshape(torch.tensor(time_series), (1, 2, signal_len)).to("cpu")
raise NotImplementedError # this is not updated..
feature_tensor = torch.zeros(1) # todo: fix for real features
logits, cam, _ = model(time_series_tensor, feature_tensor)
if use_grad_cam:
logits[:, cam_target].backward()
gradients = model.get_activations_gradient()
pooled_gradients = torch.mean(gradients, dim=[0, 2])
activations = model.get_activations().detach()
if grad_weight: # weight by gradients
for i in range(activations.shape[1]): # change to torch.mm later
activations[:, i, :] *= pooled_gradients[i]
grad_cam = torch.mean(activations, dim=1).squeeze()
if use_relu:
grad_cam = np.maximum(grad_cam, 0)
grad_cam /= torch.max(grad_cam)
else:
grad_cam = (grad_cam - torch.min(grad_cam)) / (torch.max(grad_cam) - torch.min(grad_cam))
cam = grad_cam.numpy()
else:
cam = np.squeeze(cam.detach().numpy())[cam_target, :]
# Clip signal to baseline
# cam = np.maximum(cam, Counter(cam).most_common(1)[0][0])
# cam = (cam - min(cam)) / (max(cam) - min(cam))
logits = np.squeeze(logits.detach().numpy())
# The following is from Seb's plot_class_activation_map and plot_class_activation_map_template
cam_time = np.arange(cam.shape[0]) / (cam.shape[0] / 120)
cam_time_intrp = np.arange(time_series.shape[1]) * 1 / fs
cam_intrp = np.interp(cam_time_intrp, cam_time, cam)
# relevant slice
before = 60
after = 30
# time_series = time_series[:, int(before*fs): int(-after*fs)]
# cam_intrp = cam_intrp[int(before*fs):int(-after*fs)]
time_series_filt = time_series
time_series_filt_ts = np.arange(time_series_filt.shape[1]) * 1 / fs
cam_filt = cam_intrp
prob = np.exp(logits) / sum(np.exp(logits))
sp1 = spindles_detect(time_series_filt[0, :], fs)
if sp1 is not None:
bool_spindles1 = get_bool_vector(time_series_filt[0, :], fs, sp1)
spindles_highlight1 = time_series_filt[0, :] * bool_spindles1
spindles_highlight1[spindles_highlight1 == 0] = np.nan
spindles_highlight1 = spindles_highlight1[:-1]
sp2 = spindles_detect(time_series_filt[1, :], fs)
if sp2 is not None:
bool_spindles2 = get_bool_vector(time_series_filt[1, :], fs, sp2)
spindles_highlight2 = time_series_filt[1, :] * bool_spindles2
spindles_highlight2[spindles_highlight2 == 0] = np.nan
spindles_highlight2 = spindles_highlight2[:-1]
# plt.figure(figsize=(14, 4))
# plt.plot(times, data, 'k')
# plt.plot(times, spindles_highlight, 'indianred')
# Setup figure
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(wspace=0, hspace=0)
ax1 = plt.subplot2grid((3, 5), (0, 0), colspan=5)
ax2 = plt.subplot2grid((3, 5), (1, 0), colspan=5)
ax3 = plt.subplot2grid((3, 5), (2, 0), colspan=5)
class_target_dict = {0: 'Wake', 1: 'N1', 2: 'N2', 3: 'N3', 4: 'REM'}
title = f'{signal_name}'
ax1.set_title(title + '\n'
f'Truth: {label}' + '\n' +
f'Predicted Label: {label_lookup[np.squeeze(np.argmax(logits))]} ' +
str(np.round(prob[np.squeeze(np.argmax(logits))], 2)),
fontsize=20, y=1.03
)
idx1 = plot_inds[0]
idx2 = plot_inds[1] if plot_inds[1] < time_series_filt_ts.shape[0] else time_series_filt_ts.shape[0]-1
# Plot image
ax1.plot(time_series_filt_ts[idx1:idx2], time_series_filt[0, idx1:idx2], '-k', lw=1.5)
if sp1 is not None:
ax1.plot(time_series_filt_ts[idx1:idx2], spindles_highlight1, 'indianred')
ax1.set_ylabel('Normalized Amplitude', fontsize=22)
ax1.set_xlim([time_series_filt_ts[idx1], time_series_filt_ts[idx2].max()])
ax1.tick_params(labelbottom='off')
ax1.yaxis.set_tick_params(labelsize=16)
ax2.plot(time_series_filt_ts[idx1:idx2], time_series_filt[1, idx1:idx2], '-k', lw=1.5)
if sp2 is not None:
ax2.plot(time_series_filt_ts[idx1:idx2], spindles_highlight2, 'indianred')
ax2.set_ylabel('Normalized Amplitude', fontsize=22)
ax2.set_xlim([time_series_filt_ts[idx1], time_series_filt_ts[idx2].max()])
ax2.tick_params(labelbottom='off')
ax2.yaxis.set_tick_params(labelsize=16)
# Plot CAM
ax3.plot(time_series_filt_ts[idx1:idx2], cam_filt[idx1:idx2], '-k', lw=1.5)
ax3.set_xlabel('Time, seconds', fontsize=22)
ax3.set_ylabel('Class Activation Map', fontsize=22)
ax3.set_xlim([time_series_filt_ts[idx1], time_series_filt_ts[idx2].max()])
# ax2.set_ylim([cam_filt.min()-0.05, cam_filt.max()+0.05])
ax3.xaxis.set_tick_params(labelsize=16)
ax3.yaxis.set_tick_params(labelsize=16)
plt.show()
def extract_results(file_name):
try:
with open(f'saved_models/{file_name}/{file_name}_test_perf.pkl', 'rb') as f:
classi_perf = pkl.load(f)
print("Classification Performance:")
print(f'Accuracy: {classi_perf["accuracy"]:.2f}, Kappa: {classi_perf["kappa_score"]:.2f}, '
f'f1m: {classi_perf["f1m"]:.2f}, f1M: {classi_perf["f1M"]:.2f}')
print("")
except FileNotFoundError:
print("Warning: no test_perf.pkl found in folder")
try:
with open(f'saved_models/{file_name}/{file_name}_test_r2.pkl', 'rb') as f:
r2 = pkl.load(f)
print("Independence results")
print(f"R2: {r2:.3f}")
print("")
except FileNotFoundError:
print("Warning: no test_r2.pkl found in folder")
try:
with open(f'saved_models/{file_name}/{file_name}_rep2label_perf.pkl', 'rb') as f:
rep2label_perf = pkl.load(f)
s_rep = | pd.DataFrame(rep2label_perf) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time
def open_avro(fname):
with open(fname,'rb') as f:
freader = fastavro.reader(f)
schema = freader.writer_schema
for packet in freader:
return packet
def make_dataframe(packet):
dfc = | pd.DataFrame(packet['candidate'], index=[0]) | pandas.DataFrame |
import os
import statistics
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
FILE_DIRS = []
FILE_LISTS = []
PLOT_NAMES = []
TITLES = []
MEAN_TRAIN_STEPS = []
STD_TRAIN_STEPS = []
"""
0.pt -> rewards train: real
2.pt -> rewards train: synth., HPs: varied
1.pt -> rewards train: synth., HPs: fixed
NEW: 3.pt -> rewards from MBRL baseline
"""
FILE_DIRS.append('/home/ferreira/Projects/learning_environments/experiments/transfer_experiments/cartpole/ddqn_vary_trained_on')
TITLES.append("DDQN on DDQN-trained SEs")
FILE_LISTS.append(['0.pt', '2.pt', '1.pt'])
FILE_DIRS.append('/home/ferreira/Projects/learning_environments/experiments/transfer_experiments/cartpole/ddqn_to_duelingddqn_vary')
TITLES.append("Transfer: Dueling DDQN on DDQN-trained SEs")
FILE_LISTS.append(['0.pt', '2.pt', '1.pt'])
FILE_DIRS.append('/home/ferreira/Projects/learning_environments/experiments/transfer_experiments/cartpole' \
'/ddqn_to_td3_discrete_vary_td3HPs_variation_experiments/learned_temp_init_1_tanh_hard_True_lr_5e-4')
TITLES.append("Transfer: TD3 on DDQN-trained SEs")
FILE_LISTS.append(['0.pt', '2.pt', '1.pt'])
ddqn_mean_train_steps = [16887.6925, 6818.57, 6379.5075]
ddqn_std_train_steps = [24925.0562208, 2339.505055, 3162.9542706]
MEAN_TRAIN_STEPS.append(ddqn_mean_train_steps)
STD_TRAIN_STEPS.append(ddqn_std_train_steps)
dueling_ddqn_mean_train_steps = [12745.27, 6781.045, 6502.5125]
dueling_ddqn_std_train_steps = [14972.211664, 2198.149523570906, 3209.8083018]
MEAN_TRAIN_STEPS.append(dueling_ddqn_mean_train_steps)
STD_TRAIN_STEPS.append(dueling_ddqn_std_train_steps)
# ddqn_to_td3_discrete_vary_layer_norm_2_learned_temp
td3_mean_train_steps = [17874.925, 5832.0975, 5371.035]
td3_std_train_steps = [17834.68171216899, 1576.944465729136, 2414.505099140401]
############ PARAMETERS ############
show_5_best_jointly_with_other = True
show_zoom = False
if show_5_best_jointly_with_other:
# mode 2, 5 best models (see syn_env_evaluate_cartpole_vary_hp_2_TD3_discrete.py for which one), 4k evals (80(agents_num)*5(models)*10(
# evals per model))
td3_mean_train_steps[1] = 6287.5
td3_std_train_steps[1] = 1970.6455160682756
TITLES[2] = "Transfer: TD3 on DDQN-trained SEs"
FILE_LISTS[2] = ['0.pt', '2_5_best_filtered_models.pt', '1.pt']
if show_zoom:
plot_name = 'CP_vary_hp_merged_plots_best_5_dtd3_only_kde_zoom.pdf'
else:
plot_name = 'CP_vary_hp_merged_plots_best_5_dtd3_only_kde.pdf'
key = "2_5_best_filtered_models" # don't comment this line
MEAN_TRAIN_STEPS.append(td3_mean_train_steps)
STD_TRAIN_STEPS.append(td3_std_train_steps)
if __name__ == "__main__":
nrows = 1
gridspec_kw = {}
figsize = (15, 3)
fig, axes = plt.subplots(figsize=figsize, ncols=3, nrows=nrows, sharex="row", sharey="row", gridspec_kw=gridspec_kw)
for i, data in enumerate(zip(FILE_DIRS, FILE_LISTS, TITLES, MEAN_TRAIN_STEPS, STD_TRAIN_STEPS)):
FILE_DIR, FILE_LIST, title, mean_train_steps, std_train_steps = data
data_list = []
mean_list = []
episode_num_needed_means = []
episode_num_needed_stds = []
reward_list_single_2 = []
for j, file in enumerate(FILE_LIST):
file_path = os.path.join(FILE_DIR, file)
save_dict = torch.load(file_path)
reward_list = save_dict['reward_list']
if key in file:
# keys got falsely named in the beginning of experiment:
# train_steps were named num_episodes
# new models are now correctly using keys --> mapping needed
mean_episode_num = np.mean(save_dict["episode_length_needed"])
std_episode_num = np.std(save_dict["episode_length_needed"])
if show_5_best_jointly_with_other:
# show result from 2.pt jointly with 2_5_best_filtered_models.pt
save_dict = torch.load(os.path.join(FILE_DIR, "2.pt"))
reward_list_2 = save_dict['reward_list']
for r_list in reward_list_2:
reward_list_single_2 += r_list
data_list.append(reward_list_single_2)
else:
mean_episode_num = np.mean(save_dict["train_steps_needed"])
std_episode_num = np.std(save_dict["train_steps_needed"])
reward_list_single = []
for r_list in reward_list:
reward_list_single += r_list
data_list.append(reward_list_single)
if i == 0 and j == 0:
mean_list.append('{:.2f}'.format((statistics.mean(reward_list_single))))
elif key in file and show_5_best_jointly_with_other:
mean_list.append('{:.2f} (all: {:.2f})'.format(statistics.mean(reward_list_single), statistics.mean(reward_list_single_2)))
else:
mean_list.append('{:.2f}'.format((statistics.mean(reward_list_single))))
episode_num_needed_means.append(mean_episode_num)
episode_num_needed_stds.append(std_episode_num)
if len(data_list) == 4:
data_dict = {
'train: real': data_list[0],
'train: synth., HPs: varied': data_list[1],
'train: synth., HPs: fixed': data_list[3],
'train: synth., HPs: varied (5 best)': data_list[2],
}
else:
data_dict = {
'train: real': data_list[0],
'train: synth., HPs: varied': data_list[1],
'train: synth., HPs: fixed': data_list[2],
}
df = | pd.DataFrame(data=data_dict) | pandas.DataFrame |
"""
2018 <NAME>
9.tcga-classify/classify-with-raw-expression.py
Predict if specific genes are mutated across TCGA tumors based on raw RNAseq gene
expression features. Also make predictions on cancer types using raw gene expression.
Usage:
python classify-with-raw-expression.py
Output:
Gene specific DataFrames storing ROC, precision-recall, and classifier coefficients
for raw gene expression models trained in their ability to predict mutations. The genes
used are the top 50 most mutated genes in TCGA PanCanAtlas. A gene was considered
mutated if a non-silent mutation was observed by the MC3 mutation calling effort. An
additional metrics file that stores all gene AUROC and AUPR is also saved. We also save
predictions and results for cancer-types.
"""
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from scripts.tcga_util import (
get_threshold_metrics,
summarize_results,
extract_coefficients,
align_matrices,
process_y_matrix,
train_model,
process_y_matrix_cancertype,
check_status,
)
np.random.seed(123)
# Load constants
filter_prop = 0.05
filter_count = 15
folds = 5
num_features = 8000
max_iter = 100
seed = "123"
algorithm = "raw"
alphas = [0.1, 0.13, 0.15, 0.2, 0.25, 0.3]
l1_ratios = [0.15, 0.16, 0.2, 0.25, 0.3, 0.4]
# Load genes
file = os.path.join("data", "top50_mutated_genes.tsv")
genes_df = | pd.read_table(file) | pandas.read_table |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray
class TestSparseArrayConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_basic(self, kind):
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=kind)
result = | SparseArray._concat_same_type([a, b]) | pandas.core.arrays.sparse.SparseArray._concat_same_type |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import settings # Import related setting constants from settings.py
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.graph_objs as go
import settings
import itertools
import math
import base64
from flask import Flask
import os
from sqlalchemy import create_engine
import datetime
from googletrans import Translator
import re
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from nltk.probability import FreqDist
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from textblob import TextBlob
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
today = datetime.datetime.now().strftime("%B %d, %Y")
translator = Translator()
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
#app.title = 'Real-Time Twitter Monitor'
app.meta_tags=[
{
'name': 'DS4A',
'content': 'TWITTER'
},
{
'http-equiv': 'X-UA-Compatible',
'content': 'IE=edge'
},
{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0'
}
]
server = app.server
app.layout = html.Div(children=[
#html.H2('Real-time Twitter Sentiment Analysis for Topic Tracking', style={'textAlign': 'center'}),
html.P('(Last updated: {})'.format(today), style={'textAlign': 'right', 'fontSize':15}),
html.Div(id='live-update-graph'),
html.Div(id='live-update-graph-bottom'),
# ABOUT ROW
html.Div(
className='row',
children=[
html.Div(
className='three columns',
children=[
html.P(
'Data extracted from:'
),
html.A(
'Twitter API',
href='https://developer.twitter.com'
)
]
),
html.Div(
className='three columns',
children=[
html.P(
'Code avaliable at:'
),
html.A(
'GitHub',
href='https://github.com/Chulong-Li/Real-time-Sentiment-Tracking-on-Twitter-for-Brand-Improvement-and-Trend-Recognition'
)
]
),
html.Div(
className='three columns',
children=[
html.P(
'Made with:'
),
html.A(
'Dash / Plot.ly',
href='https://plot.ly/dash/'
)
]
),
html.Div(
className='three columns',
children=[
html.P(
'Author:'
),
html.A(
'<NAME>',
href='https://www.linkedin.com/in/chulong-li/'
)
]
)
], style={'marginLeft': 70, 'fontSize': 8}
),
dcc.Interval(
id='interval-component-slow',
interval=1*10000, # in milliseconds
n_intervals=0
)
], style={'padding': '20px'})
# Multiple components can update everytime interval gets fired.
@app.callback(Output('live-update-graph', 'children'),
[Input('interval-component-slow', 'n_intervals')])
def update_graph_live(n):
# Loading data from RDS PostgreSQL
engine = create_engine('postgresql://postgres@psql-ds4a-prod:<EMAIL>:5432/ds4a_mta_twitterdb')
query = "SELECT id_str, text, created_at, polarity, user_location, user_followers_count FROM {}".format(settings.TABLE_NAME)
df = pd.read_sql(query,engine)
# Convert UTC into PDT
df['created_at'] = pd.to_datetime(df['created_at']).apply(lambda x: x - datetime.timedelta(hours=3))
# Clean and transform data to enable time series
result = df.groupby([pd.Grouper(key='created_at', freq='10s'), 'polarity']).count().unstack(fill_value=0).stack().reset_index()
result = result.rename(columns={"id_str": "Num of '{}' mentions".format(settings.TRACK_WORDS[0]), "created_at":"Time"})
time_series = result["Time"][result['polarity']==0].reset_index(drop=True)
min10 = datetime.datetime.now() - datetime.timedelta(hours=3, minutes=10)
min20 = datetime.datetime.now() - datetime.timedelta(hours=3, minutes=20)
neu_num = result[result['Time']>min10]["Num of '{}' mentions".format(settings.TRACK_WORDS[0])][result['polarity']==0].sum()
neg_num = result[result['Time']>min10]["Num of '{}' mentions".format(settings.TRACK_WORDS[0])][result['polarity']==-1].sum()
pos_num = result[result['Time']>min10]["Num of '{}' mentions".format(settings.TRACK_WORDS[0])][result['polarity']==1].sum()
# Loading back-up summary data
# This table must be create before
query = "SELECT daily_user_num, daily_tweets_num, impressions FROM backup;"
back_up = | pd.read_sql(query, engine) | pandas.read_sql |
import pickle
import pandas as pd
import time as time
def merge_with_metatable(from_sp, to_sp, df_spectra, save=False):
"""
merge_with_metatable()
Parameters
----------
from_sp : string
The number from which to merge spectra with meta-data. String, beceause it
must match the filename in folder data/sdss/spectra/
to_sp : string
The number which specifies the upper limit to merge spectra with meta-data.
String, beceause it must match the filename in folder data/sdss/spectra/
df_spectra : pd.DataFrame
The DataFrame that comes from downloading the raw spectral data. None by
default, in which case its loaded from disk.
save : boolean
When True, save the resulting merged table into a pickle
When False, don't save the resulting merged table
Returns
-------
df_merged : pandas.DataFrame
A merged table that contains spectral data all meta information from
data/sdss/meta_table.pkl:
columns: 'flux_list',
'wavelength',
'objid',
'bestObjID',
'fluxObjID',
'targetObjID',
'plate',
'class',
'subClass',
'zErr',
'petroMag_u',
'petroMag_g',
'petroMag_r',
'petroMag_i',
'petroMag_z',
'petroMagErr_u',
'petroMagErr_g',
'petroMagErr_r',
'petroMagErr_i',
'petroMagErr_z',
'dec',
'z',
'ra'
"""
df_meta_data = pd.read_pickle('data/quasar_meta_table.pkl')
df_meta_data["objid"] = df_meta_data['bestObjID'].astype(int)
df_spectra['objid'] = df_spectra['objid'].astype(int)
print(f'df_spectra before removing duplicates = {df_spectra.shape[0]}')
df_spectra = df_spectra.drop_duplicates(subset=['objid'])
df_meta_data = df_meta_data.drop_duplicates(subset=['objid', 'z'])
print(f'df_spectra after removing duplicates = {df_spectra.shape[0]}')
df_meta_data = df_meta_data.drop(columns={"specObjID"})
df_merged = | pd.merge(df_spectra, df_meta_data, on=['objid']) | pandas.merge |
from sklearn import tree
from sklearn.metrics import accuracy_score
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
import itertools
import math
from TMDataset import TMDataset
import const
import util
class TMDetection:
dataset = TMDataset()
classes = []
classes2string = {}
classes2number = {}
def __init__(self):
if not const.HAVE_DT:
self.dataset.create_balanced_dataset(const.SINTETIC_LEARNING)
classes_dataset = self.dataset.get_dataset['target'].values
print(classes_dataset)
for i, c in enumerate(sorted(set(classes_dataset))):
self.classes2string[i] = c
self.classes2number[c] = i
self.classes.append(c)
def __get_sets_for_classification(self, df_train, df_test, features):
train, test = util.fill_nan_with_mean_training(df_train, df_test)
train_features = train[features].values
train_classes = [self.classes2number[c] for c in train['target'].values]
test_features = test[features].values
test_classes = [self.classes2number[c] for c in test['target'].values]
return train_features, train_classes, test_features, test_classes
# decision tree algorithm training on training al train set and test on all test set
def decision_tree(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
print("DECISION TREE.....")
print("CLASSIFICATION BASED ON THESE SENSORS: ", self.dataset.get_remained_sensors(sensors_set))
print("NUMBER OF FEATURES: ", len(features))
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
self.dataset.get_train, self.dataset.get_test, features)
classifier_decision_tree = tree.DecisionTreeClassifier()
classifier_decision_tree.fit(train_features, train_classes)
test_prediction = classifier_decision_tree.predict(test_features)
acc = accuracy_score(test_classes, test_prediction)
df_feature = pd.DataFrame(
{'accuracy': acc, 'features': features, 'importance': classifier_decision_tree.feature_importances_})
df_feature = df_feature.sort_values(by='importance', ascending=False)
print("ACCURACY : " + str(acc))
print("END TREE")
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
df_feature.to_csv(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_DECISION_TREE_RESULTS, index=False)
# random forest algorithm training on training al train set and test on all test set
def random_forest(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
print("RANDOM FOREST.....")
print("CLASSIFICATION BASED ON THESE SENSORS: ", self.dataset.get_remained_sensors(sensors_set))
print("NUMBER OF FEATURES: ", len(features))
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
self.dataset.get_train, self.dataset.get_test, features)
classifier_forest = RandomForestClassifier(n_estimators=const.PAR_RF_ESTIMATOR)
classifier_forest.fit(train_features, train_classes)
test_prediction = classifier_forest.predict(test_features)
acc = accuracy_score(test_classes, test_prediction)
df_feature = pd.DataFrame(
{'accuracy': acc, 'featureName': features, 'importance': classifier_forest.feature_importances_})
df_feature = df_feature.sort_values(by='importance', ascending=False)
print("ACCURACY : " + str(acc))
print("END RANDOM FOREST")
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
df_feature.to_csv(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_RANDOM_FOREST_RESULTS, index=False)
# neural network algorithm training on training al train set and test on all test set
def neural_network(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
print("NEURAL NETWORK.....")
print("CLASSIFICATION BASED ON THESE SENSORS: ", self.dataset.get_remained_sensors(sensors_set))
print("NUMBER OF FEATURES: ", len(features))
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
self.dataset.get_train, self.dataset.get_test, features)
train_features_scaled, test_features_scaled = util.scale_features(train_features, test_features)
classifier_nn = MLPClassifier(hidden_layer_sizes=(const.PAR_NN_NEURONS[sensors_set],),
alpha=const.PAR_NN_ALPHA[sensors_set], max_iter=const.PAR_NN_MAX_ITER,
tol=const.PAR_NN_TOL)
classifier_nn.fit(train_features_scaled, train_classes)
test_prediction = classifier_nn.predict(test_features_scaled)
acc = accuracy_score(test_classes, test_prediction)
print("ACCURACY : " + str(acc))
print("END NEURAL NETWORK")
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
file_content = "acc\n" + str(acc)
with open(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_NEURAL_NETWORK_RESULTS, 'w') as f:
f.write(file_content)
# support vector machine algorithm training on training al train set and test on all test set
def support_vector_machine(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
print("SUPPORT VECTOR MACHINE.....")
print("CLASSIFICATION BASED ON THESE SENSORS: ", self.dataset.get_remained_sensors(sensors_set))
print("NUMBER OF FEATURES: ", len(features))
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
self.dataset.get_train, self.dataset.get_test, features)
train_features_scaled, test_features_scaled = util.scale_features(train_features, test_features)
classifier_svm = SVC(C=const.PAR_SVM_C[sensors_set], gamma=const.PAR_SVM_GAMMA[sensors_set], verbose=False)
classifier_svm.fit(train_features_scaled, train_classes)
test_prediction = classifier_svm.predict(test_features_scaled)
acc = accuracy_score(test_classes, test_prediction)
print("ACCURACY : " + str(acc))
print("END SUPPORT VECTOR MACHINE.....")
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
file_content = "acc\n" + str(acc)
with open(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_SUPPORT_VECTOR_MACHINE_RESULTS, 'w') as f:
f.write(file_content)
# use different algorithms changing target classes, try all combination of two target classes
def classes_combination(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
class_combination = list(itertools.combinations(self.classes, 2))
train = self.dataset.get_train.copy()
test = self.dataset.get_test.copy()
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
with open(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_TWO_CLASSES_COMBINATION, 'w') as f:
f.write("combination, algorithm, accuracy")
for combination in class_combination:
cc_train = train.loc[(train['target'] == combination[0]) | (train['target'] == combination[1])]
cc_test = test.loc[(test['target'] == combination[0]) | (test['target'] == combination[1])]
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
cc_train, cc_test, features)
# buil all classifier
classifier_tree = tree.DecisionTreeClassifier()
classifier_forest = RandomForestClassifier(n_estimators=const.PAR_RF_ESTIMATOR)
classifier_nn = MLPClassifier(hidden_layer_sizes=(const.PAR_NN_NEURONS[sensors_set],),
alpha=const.PAR_NN_ALPHA[sensors_set], max_iter=const.PAR_NN_MAX_ITER,
tol=const.PAR_NN_TOL)
classifier_svm = SVC(C=const.PAR_SVM_C[sensors_set], gamma=const.PAR_SVM_GAMMA[sensors_set],
verbose=False)
# train all classifier
classifier_tree.fit(train_features, train_classes)
classifier_forest.fit(train_features, train_classes)
classifier_nn.fit(train_features, train_classes)
classifier_svm.fit(train_features, train_classes)
# use classifier on test set
test_prediction_tree = classifier_tree.predict(test_features)
test_prediction_forest = classifier_forest.predict(test_features)
test_prediction_nn = classifier_nn.predict(test_features)
test_prediction_svm = classifier_svm.predict(test_features)
# evaluate classifier
acc_tree = accuracy_score(test_classes, test_prediction_tree)
acc_forest = accuracy_score(test_classes, test_prediction_forest)
acc_nn = accuracy_score(test_classes, test_prediction_nn)
acc_svm = accuracy_score(test_classes, test_prediction_svm)
# print result
print(str(combination))
print("DECISION TREE : ", str(acc_tree))
f.write(str(combination) + ", DT ," + str(acc_tree) + "\n")
print("RANDOM FOREST : ", str(acc_forest))
f.write(str(combination) + ", RF ," + str(acc_forest) + "\n")
print("NEURAL NETWORK : ", str(acc_nn))
f.write(str(combination) + ", NN ," + str(acc_nn) + "\n")
print("SUPPORT VECTOR MACHINE : ", str(acc_svm))
f.write(str(combination) + ", SVM ," + str(acc_svm) + "\n")
# use different algorithms leaving one subject out from training and testing only on this subject -
# considering all classes in dataset and only user classes
def leave_one_subject_out(self, sensors_set):
features = list(self.dataset.get_sensors_set_features(sensors_set))
train = self.dataset.get_train.copy()
test = self.dataset.get_test.copy()
if not os.path.exists(const.DIR_RESULTS):
os.makedirs(const.DIR_RESULTS)
with open(const.DIR_RESULTS + "/" + str(sensors_set) + const.FILE_LEAVE_ONE_SUBJECT_OUT, 'w') as f:
f.write(
"user, classes, complete training examples, user class training examples, test examples, algorithm, acc all classes, acc user classes\n")
for u in self.dataset.get_users:
user_train = train.loc[(train['user'] != u)]
user_test = test.loc[(test['user'] == u)]
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
user_train, user_test, features)
# buil all classifier
classifier_tree = tree.DecisionTreeClassifier()
classifier_forest = RandomForestClassifier(n_estimators=const.PAR_RF_ESTIMATOR)
classifier_nn = MLPClassifier(hidden_layer_sizes=(const.PAR_NN_NEURONS[sensors_set],),
alpha=const.PAR_NN_ALPHA[sensors_set], max_iter=const.PAR_NN_MAX_ITER,
tol=const.PAR_NN_TOL)
classifier_svm = SVC(C=const.PAR_SVM_C[sensors_set], gamma=const.PAR_SVM_GAMMA[sensors_set],
verbose=False)
# train all classifier
classifier_tree.fit(train_features, train_classes)
classifier_forest.fit(train_features, train_classes)
classifier_nn.fit(train_features, train_classes)
classifier_svm.fit(train_features, train_classes)
# use classifier on test set
test_prediction_tree = classifier_tree.predict(test_features)
test_prediction_forest = classifier_forest.predict(test_features)
test_prediction_nn = classifier_nn.predict(test_features)
test_prediction_svm = classifier_svm.predict(test_features)
# evaluate classifier
acc_tree = accuracy_score(test_classes, test_prediction_tree)
acc_forest = accuracy_score(test_classes, test_prediction_forest)
acc_nn = accuracy_score(test_classes, test_prediction_nn)
acc_svm = accuracy_score(test_classes, test_prediction_svm)
user_classes = []
acc_class_tree = acc_tree
acc_class_forest = acc_forest
acc_class_nn = acc_nn
acc_class_svm = acc_svm
user_class_train = user_train
for i, c in enumerate(sorted(set(user_test['target'].values))):
user_classes.append(c)
# if user don't have collect all classes we need to calculate different acc with training
# composed only by user classes
if len(user_classes) != len(self.dataset.get_tm):
if len(user_classes) != 1:
user_class_train = user_train.loc[user_train['target'].isin(user_classes)]
train_class_features, train_class_classes, test_features, test_classes = self.__get_sets_for_classification(
user_class_train,
user_test,
features)
# train all classifier
classifier_tree.fit(train_class_features, train_class_classes)
classifier_forest.fit(train_class_features, train_class_classes)
classifier_nn.fit(train_class_features, train_class_classes)
classifier_svm.fit(train_class_features, train_class_classes)
# use classifier on test set
test_prediction_tree = classifier_tree.predict(test_features)
test_prediction_forest = classifier_forest.predict(test_features)
test_prediction_nn = classifier_nn.predict(test_features)
test_prediction_svm = classifier_svm.predict(test_features)
# evaluate classifier
acc_class_tree = accuracy_score(test_classes, test_prediction_tree)
acc_class_forest = accuracy_score(test_classes, test_prediction_forest)
acc_class_nn = accuracy_score(test_classes, test_prediction_nn)
acc_class_svm = accuracy_score(test_classes, test_prediction_svm)
else:
acc_class_tree = 1
acc_class_forest = 1
acc_class_nn = 1
acc_class_svm = 1
pre = str(u) + "," + str(' '.join(user_classes)) + "," + str(user_train.shape[0]) + "," + str(
user_class_train.shape[0]) + "," + str(user_test.shape[0])
f.write(pre + ", DT ," + str(acc_tree) + "," + str(acc_class_tree) + "\n")
f.write(pre + ", RF ," + str(acc_forest) + "," + str(acc_class_forest) + "\n")
f.write(pre + ", NN ," + str(acc_nn) + "," + str(acc_class_nn) + "\n")
f.write(pre + ", SVM ," + str(acc_svm) + "," + str(acc_class_svm) + "\n")
# use feature relative to one sensor to build model and evaluate
def single_sensor_accuracy(self):
sensor = []
accuracy = []
std = []
for s in self.dataset.get_sensors:
if s != "activityrecognition":
print(s)
features = self.dataset.get_sensor_features(s)
train = self.dataset.get_train.copy()
test = self.dataset.get_test.copy()
train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(train,
test,
features)
singleAcc = []
for i in range(const.REPEAT):
# build classifier
classifier_forest = RandomForestClassifier(n_estimators=const.PAR_RF_ESTIMATOR)
classifier_forest.fit(train_features, train_classes)
test_prediction_forest = classifier_forest.predict(test_features)
acc_forest = accuracy_score(test_classes, test_prediction_forest)
singleAcc.append(acc_forest)
accM = util.average(singleAcc)
variance = list(map(lambda x: (x - accM) ** 2, singleAcc))
standard_deviation = math.sqrt(util.average(variance))
print(s, accM, standard_deviation)
accuracy.append(accM)
std.append(standard_deviation)
sensor.append(s)
df_single_sensor_acc = | pd.DataFrame({'sensor': sensor, 'accuracy': accuracy, 'dev_standard': std}) | pandas.DataFrame |
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"起始符":hex2str(self.oj["起始符"]),
"命令标识":dict_list_replace('02', self.oj['命令标识']),
"应答标志":dict_list_replace('03', self.oj['应答标志']),
"唯一识别码":hex2str(self.oj["唯一识别码"]),
"数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']),
"数据单元长度":hex2dec(self.oj["数据单元长度"]),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
self.mo = self.oj["命令标识"]
glv.set_value('data_f', self.next)
glv.set_value('data_mo', self.mo)
glv.set_value('data_01to07', self.o)
print('fun_01to06 done!')
## fun_07
class fun_07:
def __init__(self, data):
self.mo = glv.get_value("data_mo")
if self.mo == '01':
self.o = fun_07_01(glv.get_value('data_f'))
elif self.mo == '02' or self.mo == '03':
self.o = fun_07_02(glv.get_value('data_f'))
elif self.mo == '04':
self.o = fun_07_04(glv.get_value('data_f'))
elif self.mo == '05':
self.o = fun_07_05(glv.get_value('data_f'))
elif self.mo == '06':
self.o = fun_07_06(glv.get_value('data_f'))
else :
print('命令标识:',self.mo,'有误')
self.c = fun_07_cursor(glv.get_value('data_f'))
self.oj = dict(self.o.oj, **self.c.oj)
self.oj2 = {'数据单元':self.oj}
self.ol = pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True)
self.pj = dict(self.o.pj, **self.c.pj)
self.pj2 = {'数据单元':self.pj}
self.pl = pd.merge(self.o.pl, self.c.pl, left_index=True, right_index=True)
print('fun_07 done!')
## fun_07_01
class fun_07_01(object):
def __init__(self, data):
self.cf = [6, 2, 20, 1, 1]
self.cf_a = hexlist2(self.cf)
self.n = hex2dec(data[self.cf_a[3]:self.cf_a[4]])
self.m = hex2dec(data[self.cf_a[4]:self.cf_a[5]])
self.cf.append(self.n*self.m)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"数据采集时间",
"登入流水号",
"ICCID",
"可充电储能子系统数",
"可充电储能系统编码长度",
"可充电储能系统编码",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'车辆登入': self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"数据采集时间":get_datetime(self.oj['数据采集时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"ICCID":hex2str(self.oj['ICCID']),
"可充电储能子系统数":hex2dec(self.oj['可充电储能子系统数']),
"可充电储能系统编码长度":hex2dec(self.oj['可充电储能系统编码长度']),
"可充电储能系统编码":fun_07_01.fun_07_01_06(self.oj['可充电储能系统编码'], self.oj['可充电储能子系统数'], self.oj['可充电储能系统编码长度']),
}
self.pj2 = {'车辆登入': self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_01', self.o)
print('fun_07_01 done!')
def fun_07_01_06(data, n, m):
if m=='00':
return "NA"
else :
n = hex2dec(n)
m = hex2dec(m) * 2
output = []
for i in range(n):
output_unit = hex2str(data[i * m: i* m +m])
output.append(output_unit)
return output
## fun_07_04
class fun_07_04(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_04', self.o)
print('fun_07_04 done!')
## fun_07_05
class fun_07_05(object):
def __init__(self, data):
self.cf = [6, 2, 12, 20, 1]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"平台登入时间",
"登入流水号",
"平台用户名",
"平台密码",
"加密规则",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"平台登入时间":get_datetime(self.oj['平台登入时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"平台用户名":hex2str(self.oj['平台用户名']),
"平台密码":hex2str(self.oj['平台密码']),
"加密规则":dict_list_replace('07_05_05',self.oj['加密规则']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_05', self.o)
print('fun_07_05 done!')
## fun_07_06
class fun_07_06(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
print(self.oj)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_06', self.o)
print('fun_07_06 done!')
## fun_07_02
class fun_07_02:
def __init__(self, data):
self.o = data
self.oj = {'数据采集时间': self.o[:12]}
self.ol = pd.DataFrame({'01':['01']})
self.pj = {'数据采集时间': get_datetime(self.oj['数据采集时间'])}
self.pl = pd.DataFrame({'01':['01']})
glv.set_value('data_f', data[12:])
glv.set_value('m_07_02', data[12:14])
self.mo_list = glv.get_value('model')
self.do_list = []
while(glv.get_value('m_07_02') in self.mo_list):
# 记录已执行的
self.do_list.append(glv.get_value('m_07_02'))
# 删除已执行的
self.mo_list.remove(glv.get_value('m_07_02'))
if glv.get_value('m_07_02') == '01':
self.f_01 = fun_07_02_01(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '02':
self.f_02 = fun_07_02_02(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '03':
self.f_03 = fun_07_02_03(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '04':
self.f_04 = fun_07_02_04(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '05':
self.f_05 = fun_07_02_05(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '06':
self.f_06 = fun_07_02_06(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '07':
self.f_07 = fun_07_02_07(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '08':
self.f_08 = fun_07_02_08(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '09':
self.f_09 = fun_07_02_09(glv.get_value('data_f'))
else:
print("fun_07_02 done")
print(glv.get_value('data_f'))
print(glv.get_value('m_07_02'))
self.do_list.sort()
for i in self.do_list:
if i == '01':
self.oj = dict(self.oj,**self.f_01.oj2)
self.ol = pd.merge(self.ol, self.f_01.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_01.pj2)
self.pl = pd.merge(self.pl, self.f_01.pl, left_index=True, right_index=True)
elif i == '02':
self.oj = dict(self.oj,**self.f_02.oj2)
self.ol = pd.merge(self.ol, self.f_02.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_02.pj2)
self.pl = pd.merge(self.pl, self.f_02.pl, left_index=True, right_index=True)
elif i == '03':
self.oj = dict(self.oj,**self.f_03.oj2)
self.ol = pd.merge(self.ol, self.f_03.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_03.pj2)
self.pl = pd.merge(self.pl, self.f_03.pl, left_index=True, right_index=True)
elif i == '04':
self.oj = dict(self.oj,**self.f_04.oj2)
self.ol = pd.merge(self.ol, self.f_04.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_04.pj2)
self.pl = pd.merge(self.pl, self.f_04.pl, left_index=True, right_index=True)
elif i == '05':
self.oj = dict(self.oj,**self.f_05.oj2)
self.ol = pd.merge(self.ol, self.f_05.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_05.pj2)
self.pl = pd.merge(self.pl, self.f_05.pl, left_index=True, right_index=True)
elif i == '06':
self.oj = dict(self.oj,**self.f_06.oj2)
self.ol = pd.merge(self.ol, self.f_06.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_06.pj2)
self.pl = pd.merge(self.pl, self.f_06.pl, left_index=True, right_index=True)
elif i == '07':
self.oj = dict(self.oj,**self.f_07.oj2)
self.ol = pd.merge(self.ol, self.f_07.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_07.pj2)
self.pl = pd.merge(self.pl, self.f_07.pl, left_index=True, right_index=True)
elif i == '08':
self.oj = dict(self.oj,**self.f_08.oj2)
self.ol = pd.merge(self.ol, self.f_08.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_08.pj2)
self.pl = pd.merge(self.pl, self.f_08.pl, left_index=True, right_index=True)
elif i == '09':
self.oj = dict(self.oj,**self.f_09.oj2)
self.ol = pd.merge(self.ol, self.f_09.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_09.pj2)
self.pl = pd.merge(self.pl, self.f_09.pl, left_index=True, right_index=True)
self.oj2 = {'信息上报': self.oj}
self.pj2 = {'信息上报': self.pj}
print('fun_07_02 done!')
## fun_07_02_01
class fun_07_02_01(object):
def __init__(self, data):
self.cf = [1, 1, 1, 2, 4, 2, 2, 1, 1, 1, 2, 1, 1]
self.cf_a = hexlist2(self.cf)
data = data[2:]
self.o = data[0:self.cf_a[-1]]
self.list_o = [
'车辆状态',
'充电状态',
'运行模式',
'车速',
'累计里程',
'总电压',
'总电流',
'SOC',
'DC-DC状态',
'挡位',
'绝缘电阻',
'加速踏板行程值',
'制动踏板状态',
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'整车数据': self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
'车辆状态' : dict_list_replace("07_02_01_01", self.oj['车辆状态']),
'充电状态' : dict_list_replace("07_02_01_02", self.oj['充电状态']),
'运行模式' : dict_list_replace("07_02_01_03", self.oj['运行模式']),
'车速' : hex2dec(self.oj['车速'], k=0.1),
'累计里程' : hex2dec(self.oj['累计里程'], k=0.1),
'总电压' : hex2dec(self.oj['总电压'], k=0.1),
'总电流' : hex2dec(self.oj['总电流'], n=-1000, k=0.1),
'SOC' : hex2dec(self.oj['SOC']),
'DC-DC状态' : dict_list_replace("07_02_01_06", self.oj['DC-DC状态']),
'挡位' : fun_07_02_01.fun_10(self.oj['挡位']),
'绝缘电阻' : hex2dec(self.oj['绝缘电阻']),
'加速踏板行程值' : fun_07_02_01.fun_12(self.oj['加速踏板行程值']),
'制动踏板状态' : fun_07_02_01.fun_13(self.oj['制动踏板状态']),
}
self.pj2 = {'整车数据': self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_01', self.o)
print('fun_07_02_01 done!')
# 02_01_10 挡位
def fun_10(data):
n = '{:08b}'.format(int(data, 16))
dangwei = n[-4:]
zhidongli = n[-5]
qudongli = n[-6]
# 挡位
if dangwei == '0000':
dangwei_s = '空挡'
elif dangwei == '1101':
dangwei_s = '倒挡'
elif dangwei == '1110':
dangwei_s = '自动D挡'
elif dangwei == '1111':
dangwei_s = '停车P挡'
else :
dangwei_s = (str(int(dangwei, 2)) + "档" )
# 制动力
if zhidongli == "1":
zhidongli_s = "有制动力"
else :
zhidongli_s = "无制动力"
# 驱动力
if qudongli == "1":
qudongli_s = "有驱动力"
else :
qudongli_s = "无驱动力"
output = [n, dangwei_s, zhidongli_s, qudongli_s]
return output
# 02_01_12 加速踏板行程值
def fun_12(data):
data = data.upper()
if data == 'FE':
return "异常"
elif data == "FF":
return "无效"
else :
return hex2dec(data)
# 02_01_13 制动踏板状态
def fun_13(data):
data = data.upper()
if data == 'FE':
return "异常"
elif data == "FF":
return "无效"
elif data == "65":
return "制动有效"
else :
return hex2dec(data)
## fun_07_02_02
class fun_07_02_02(object):
def __init__(self, data):
data = data[2:]
self.dj_n_o = data[0:2]
self.dj_n_j = hex2dec(self.dj_n_o) # 电机个数
self.cf_u = [1, 1, 1, 2, 2, 1, 2, 2]
self.cf = [1] + self.cf_u * self.dj_n_j
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"驱动电机个数",
"驱动电机序号",
"驱动电机状态",
"驱动电机控制器温度",
"驱动电机转速",
"驱动电机转矩",
"驱动电机温度",
"电机控制器输入电压",
"电机控制器直流母线电流",
]
self.oj = fun_07_02_02.fun_oj(self)
self.oj2 = {'驱动电机数据':self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"驱动电机个数":self.dj_n_j,
"驱动电机序号":[hex2dec(i) for i in self.oj['驱动电机序号']],
"驱动电机状态":[dict_list_replace('07_02_02_02', i) for i in self.oj['驱动电机状态']],
"驱动电机控制器温度":[hex2dec(i, n=-40) for i in self.oj['驱动电机控制器温度']],
"驱动电机转速":[hex2dec(i, n=-20000) for i in self.oj['驱动电机转速']],
"驱动电机转矩":[hex2dec(i, k=0.1, n=-2000) for i in self.oj['驱动电机转矩']],
"驱动电机温度":[hex2dec(i, n=-40) for i in self.oj['驱动电机温度']],
"电机控制器输入电压":[hex2dec(i, k=0.1) for i in self.oj['电机控制器输入电压']],
"电机控制器直流母线电流":[hex2dec(i, k=0.1, n=-1000) for i in self.oj['电机控制器直流母线电流']],
}
self.pj2 = {'驱动电机数据':self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_02', self.o)
print('fun_07_02_02 done!')
# oj
def fun_oj(self):
data = self.o[2:]
cf_a = hexlist2(self.cf[1:])
dict_oj = {
"驱动电机个数":self.dj_n_o,
}
list_o = [
"驱动电机序号",
"驱动电机状态",
"驱动电机控制器温度",
"驱动电机转速",
"驱动电机转矩",
"驱动电机温度",
"电机控制器输入电压",
"电机控制器直流母线电流",
]
dict_oj_u = {
"驱动电机序号":[],
"驱动电机状态":[],
"驱动电机控制器温度":[],
"驱动电机转速":[],
"驱动电机转矩":[],
"驱动电机温度":[],
"电机控制器输入电压":[],
"电机控制器直流母线电流":[],
}
for i in range(self.dj_n_j):
for j in range(len(dict_oj_u)):
data_unit = data[cf_a[i * len(dict_oj_u) + j]:cf_a[i * len(dict_oj_u) + j +1]]
dict_oj_u[list_o[j]].append(data_unit)
dict_all = dict(dict_oj, **dict_oj_u)
return dict_all
## fun_07_02_03
class fun_07_02_03(object):
def __init__(self, data):
self.cf = [2, 2, 2, 2, None, 2, 1, 2, 1, 2, 1, 1]
data = data[2:]
self.dc_no = data[12:16]
self.dc_np = hex2dec(self.dc_no)
self.cf[4] = self.dc_np
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"燃料电池电压",
"燃料电池电流",
"燃料消耗率",
"燃料电池温度探针总数",
"探针温度值",
"氢系统中最高温度",
"氢系统中最高温度探针代号",
"氢气最高浓度",
"氢气最高浓度传感器代号",
"氢气最高压力",
"氢气最高压力传感器代号",
"高压DC/DC状态",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'燃料电池数据':self.oj}
self.ol = pd.DataFrame.from_dict(self.oj,orient='index').T
self.pj = {
"燃料电池电压":hex2dec(self.oj['燃料电池电压'], k=0.1),
"燃料电池电流":hex2dec(self.oj['燃料电池电流'], k=0.1),
"燃料消耗率":hex2dec(self.oj['燃料消耗率'], k=0.01),
"燃料电池温度探针总数":hex2dec(self.oj['燃料电池温度探针总数']),
"探针温度值":[hex2dec(i, n=-40, k=0.1) for i in self.oj['燃料电池温度探针总数']],
"氢系统中最高温度":hex2dec(self.oj['氢系统中最高温度'], n=-40, k=0.1),
"氢系统中最高温度探针代号":hex2dec(self.oj['氢系统中最高温度探针代号']),
"氢气最高浓度":hex2dec(self.oj['氢气最高浓度']),
"氢气最高浓度传感器代号":hex2dec(self.oj['氢气最高浓度传感器代号']),
"氢气最高压力":hex2dec(self.oj['氢气最高压力'], k=0.1),
"氢气最高压力传感器代号":hex2dec(self.oj['氢气最高压力传感器代号']),
"高压DC/DC状态":dict_list_replace('07_02_03_12', self.oj['高压DC/DC状态']),
}
self.pj2 = {'燃料电池数据':self.pj}
self.pl = pd.DataFrame.from_dict(self.pj,orient='index').T
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_03', self.o)
print('fun_07_02_03 done!')
## fun_07_02_04
class fun_07_02_04(object):
def __init__(self, data):
cf = [1, 2, 2]
cf_a = hexlist2(cf)
data = data[2:]
self.o = data[0:cf_a[-1]]
list_o = [
"发动机状态",
"曲轴转速",
"燃料消耗率",
]
self.oj = list2dict(self.o, list_o, cf_a)
self.oj2 = {'发动机数据':self.oj}
self.ol = pd.DataFrame.from_dict(self.oj,orient='index').T
self.pj = {
"发动机状态":dict_list_replace("07_02_01_01", self.oj['发动机状态']),
"曲轴转速":fun_07_02_04.fun_2(self.oj['曲轴转速']),
"燃料消耗率":fun_07_02_04.fun_3(self.oj['燃料消耗率']),
}
self.pj2 = {'发动机数据':self.pj}
self.pl = pd.DataFrame.from_dict(self.pj,orient='index').T
self.next = data[cf_a[-1]:]
self.nextMark = data[cf_a[-1]:cf_a[-1]+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_04', self.o)
print('fun_07_02_04 done!')
# 02_04_02 曲轴转速
def fun_2(data):
data = data.upper()
if data == 'FFFE':
return "异常"
elif data == "FFFF":
return "无效"
else :
return hex2dec(data)
# 02_04_03 燃料消耗率
def fun_3(data):
data = data.upper()
if data == 'FFFE':
return "异常"
elif data == "FFFF":
return "无效"
else :
return hex2dec(data, k=0.01)
## fun_07_02_05
class fun_07_02_05(object):
def __init__(self, data):
self.cf = [1, 4, 4]
self.cf_a = hexlist2(self.cf)
data = data[2:]
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"定位状态",
"经度",
"纬度",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'车辆位置数据':self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
'定位状态' : fun_07_02_05.fun_01(self.oj['定位状态']),
'经度' : hex2dec(self.oj['经度'], k=0.000001),
'纬度' : hex2dec(self.oj['纬度'], k=0.000001),
}
self.pj2 = {'车辆位置数据':self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_05', self.o)
print('fun_07_02_05 done!')
def fun_01(data):
n = '{:08b}'.format(int(data, 16))
state = n[-1]
longitude = n[-2]
latitude = n[-3]
if state == '0':
state_s = "定位有效"
else :
state_s = "定位无效"
if longitude == '0':
longitude_s = "北纬"
else :
longitude_s = "南纬"
if latitude == '0':
latitude_s = "东经"
else :
latitude_s = "西经"
output = [n, state_s, longitude_s, latitude_s]
return output
## fun_07_02_06
class fun_07_02_06(object):
def __init__(self, data):
self.cf = [1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1]
self.cf_a = hexlist2(self.cf)
data = data[2:]
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"最高电压电池子系统号",
"最高电压电池单体代号",
"电池单体电压最高值",
"最低电压电池子系统号",
"最低电压电池单体代号",
"电池单体电压最低值",
"最高温度子系统号",
"最高温度探针序号",
"最高温度值",
"最低温度子系统号",
"最低温度探针序号",
"最低温度值",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'极值数据':self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
'最高电压电池子系统号' : hex2dec(self.oj['最高电压电池子系统号'], e=True),
'最高电压电池单体代号' : hex2dec(self.oj['最高电压电池单体代号'], e=True),
'电池单体电压最高值' : hex2dec(self.oj['电池单体电压最高值'], k=0.001, e=True),
'最低电压电池子系统号' : hex2dec(self.oj['最低电压电池子系统号'], e=True),
'最低电压电池单体代号' : hex2dec(self.oj['最低电压电池单体代号'], e=True),
'电池单体电压最低值' : hex2dec(self.oj['电池单体电压最低值'], k=0.001, e=True),
'最高温度子系统号' : hex2dec(self.oj['最高温度子系统号'], e=True),
'最高温度探针序号' : hex2dec(self.oj['最高温度探针序号'], e=True),
'最高温度值' : hex2dec(self.oj['最高温度值'], n=-40, e=True),
'最低温度子系统号' : hex2dec(self.oj['最低温度子系统号'], e=True),
'最低温度探针序号' : hex2dec(self.oj['最低温度探针序号'], e=True),
'最低温度值' : hex2dec(self.oj['最低温度值'], n=-40, e=True),
}
self.pj2 = {'极值数据':self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_06', self.o)
print('fun_07_02_06 done!')
## fun_07_02_07
class fun_07_02_07(object):
def __init__(self, data):
self.cf = [1, 4, 1, 1, 1, 1]
self.cf_a = hexlist2(self.cf)
data = data[2:]
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"最高报警等级",
"通用报警标志",
"可充电储能装置故障总数N1",
"驱动电机故障总数N2",
"发动机故障总数N3",
"其他故障总数N4",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'极值数据':self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
'最高报警等级' : hex2dec(self.oj['最高报警等级'], e=True),
'通用报警标志' : fun_07_02_07.fun_02(self.oj['通用报警标志']),
'可充电储能装置故障总数N1' : hex2dec(self.oj['可充电储能装置故障总数N1'], e=True),
'驱动电机故障总数N2' : hex2dec(self.oj['驱动电机故障总数N2'], e=True),
'发动机故障总数N3' : hex2dec(self.oj['发动机故障总数N3'], e=True),
'其他故障总数N4' : hex2dec(self.oj['其他故障总数N4'], e=True),
}
self.pj2 = {'极值数据':self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_07', self.o)
print('fun_07_02_07 done!')
def fun_02(data):
n = '{:032b}'.format(int(data, 16))
baojing_list = [
"温度差异报警",
"电池高温报警",
"车载储能装置类型过压报警",
"车载储能装置类型欠压报警",
"SOC低报警",
"单体电池过压报警",
"单体电池欠压报警",
"SOC过高报警",
"SOC跳变报警",
"可充电储能系统不匹配报警",
"电池单体一致性差报警",
"绝缘报警",
"DC-DC温度报警",
"制动系统报警",
"DC-DC状态报警",
"驱动电机控制器温度报警",
"高压互锁状态报警",
"驱动电机温度报警",
"车载储能装置类型过充",
]
baojing = [n]
for i in range(0,19):
if n[-i] == "1":
baojing.append(baojing_list[i])
return baojing
## fun_07_02_08
class fun_07_02_08(object):
def __init__(self, data):
data = data[2:]
self.o = data
self.dj_n_o = data[0:2]
self.dj_n_j = hex2dec(self.dj_n_o) # 电池个数
self.cf_u = [1, 1]
self.cf = fun_07_02_08.fun_cf(self)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"可充电储能子系统个数",
"可充电储能子系统号",
"可充电储能装置电压",
"可充电储能装置电流",
"单体电池总数",
"本帧起始电池序号",
"本帧单体电池总数",
"单体电池电压",
]
self.oj = fun_07_02_08.fun_oj(self)
self.oj2 = {'可充电储能装置电压数据':self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"可充电储能子系统个数":self.dj_n_j,
"可充电储能子系统号":[hex2dec(i) for i in self.oj['可充电储能子系统号']],
"可充电储能装置电压":[hex2dec(i, k=0.1) for i in self.oj['可充电储能装置电压']],
"可充电储能装置电流":[hex2dec(i, k=0.1, n=-1000) for i in self.oj['可充电储能装置电流']],
"单体电池总数":[hex2dec(i) for i in self.oj['单体电池总数']],
"本帧起始电池序号":[hex2dec(i) for i in self.oj['本帧起始电池序号']],
"本帧单体电池总数":[hex2dec(i) for i in self.oj['本帧单体电池总数']],
"单体电池电压":[hex2list(i, num=2, kk=0.01) for i in self.oj['单体电池电压']],
}
self.pj2 = {'可充电储能装置电压数据':self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_07', self.o)
print('fun_07_02_08 done!')
def fun_cf(self):
cf_u=[1,2,2,2,2,1,None]
self.c = []
data = self.o
for i in range(self.dj_n_j):
cf_u[6] = hex2dec(data[20:22]) * 2
self.c = self.c + cf_u
cf_a = hexlist2(self.c)
data = data[cf_a[-1]:]
self.c = [1] + self.c
return self.c
def fun_oj(self):
data = self.o[2:]
cf_a = hexlist2(self.cf[1:])
dict_oj = {
"可充电储能子系统个数":self.dj_n_o,
}
list_o = [
"可充电储能子系统号",
"可充电储能装置电压",
"可充电储能装置电流",
"单体电池总数",
"本帧起始电池序号",
"本帧单体电池总数",
"单体电池电压",
]
dict_oj_u = {
"可充电储能子系统号":[],
"可充电储能装置电压":[],
"可充电储能装置电流":[],
"单体电池总数":[],
"本帧起始电池序号":[],
"本帧单体电池总数":[],
"单体电池电压":[],
}
for i in range(self.dj_n_j):
for j in range(len(dict_oj_u)):
data_unit = data[cf_a[i * len(dict_oj_u) + j]:cf_a[i * len(dict_oj_u) + j +1]]
dict_oj_u[list_o[j]].append(data_unit)
dict_all = dict(dict_oj, **dict_oj_u)
return dict_all
## fun_07_02_09
class fun_07_02_09(object):
def __init__(self, data):
data = data[2:]
self.o = data
self.dj_n_o = data[0:2]
self.dj_n_j = hex2dec(self.dj_n_o) # 电池个数
self.cf_u = [1, 1]
self.cf = fun_07_02_09.fun_cf(self)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"可充电储能子系统个数",
"可充电储能子系统号",
"可充电储能温度探针个数",
"可充电储能子系统各温度探针检测到的温度值",
]
self.oj = fun_07_02_09.fun_oj(self)
self.oj2 = {'可充电储能装置温度数据':self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"可充电储能子系统个数":self.dj_n_j,
"可充电储能子系统号":[hex2dec(i) for i in self.oj['可充电储能子系统号']],
"可充电储能温度探针个数":[hex2dec(i) for i in self.oj['可充电储能温度探针个数']],
"可充电储能子系统各温度探针检测到的温度值":[hex2list(i, num=1, kn=-40) for i in self.oj['可充电储能子系统各温度探针检测到的温度值']],
}
self.pj2 = {'可充电储能装置温度数据':self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('m_07_02', self.nextMark)
glv.set_value('data_07_02_07', self.o)
print('fun_07_02_09 done!')
def fun_cf(self):
cf_u=[1,2, None]
self.c = []
data = self.o
for i in range(self.dj_n_j):
n_str = data[4:8]
n = hex2dec(n_str)
cf_u[2] = n
self.c = self.c + cf_u
cf_a = hexlist2(self.c)
data = data[cf_a[-1]:]
self.c = [1] + self.c
return self.c
def fun_oj(self):
data = self.o[2:]
cf_a = hexlist2(self.cf[1:])
dict_oj = {
"可充电储能子系统个数":self.dj_n_o,
}
list_o = [
"可充电储能子系统号",
"可充电储能温度探针个数",
"可充电储能子系统各温度探针检测到的温度值",
]
dict_oj_u = {
"可充电储能子系统号":[],
"可充电储能温度探针个数":[],
"可充电储能子系统各温度探针检测到的温度值":[],
}
for i in range(self.dj_n_j):
for j in range(len(dict_oj_u)):
data_unit = data[cf_a[i * len(dict_oj_u) + j]:cf_a[i * len(dict_oj_u) + j +1]]
dict_oj_u[list_o[j]].append(data_unit)
dict_all = dict(dict_oj, **dict_oj_u)
return dict_all
## fun_07_cursor
class fun_07_cursor:
def __init__(self, data):
if len(data) > 2:
self.o = data[:-2]
elif len(data) < 2:
print("错误")
self.o = None
else :
self.o = None
self.oj = self.pj = {'自定义': self.o}
self.ol = self.pl = pd.DataFrame({'自定义': [self.o]})
print('fun_07_curos done!')
## fun_08
class fun_08:
def __init__(self, data):
self.o = glv.get_value('bcc')
self.oj = self.pj = {'校验码':self.o}
self.ol = self.pl = pd.DataFrame({'校验码':[self.o]})
print('fun_08 done!')
## 主体
class gb32960:
glv._init()
def __init__(self, data, model=['01','02','03','04','05','06','07','08','09']):
glv.set_value('model', model)
data = data.upper()
bcc = data[-2:]
glv.set_value('bcc', bcc)
data = data[:-2]
glv.set_value('data_f', data)
self.o = data
self.f_01 = fun_01to06(glv.get_value('data_f'))
self.f_07 = fun_07(glv.get_value('data_f'))
self.f_08 = bcc
self.oj = dict(self.f_01.oj, **self.f_07.oj)
self.oj = dict(self.oj, **self.f_08.oj)
self.ol = pd.merge(self.f_01.ol, self.f_07.ol, left_index=True, right_index=True)
self.ol = pd.merge(self.ol, self.f_08.ol, left_index=True, right_index=True)
self.pj = dict(self.f_01.pj, **self.f_07.pj)
self.pj = dict(self.pj, **self.f_08.pj)
self.pl = pd.merge(self.f_01.pl, self.f_07.pl, left_index=True, right_index=True)
self.pl = | pd.merge(self.pl, self.f_08.pl, left_index=True, right_index=True) | pandas.merge |
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set(rc={'figure.figsize':(11, 4)})
dataDirectory = '../data/'
graphsDirectory = 'graphs/'
def visDay(dfs,sensors,day):
plt.clf()
fig, axs = plt.subplots(len(dfs),sharex=True,sharey=True,gridspec_kw={'hspace': 0.5},figsize=(20, 10))
fig.suptitle('Measurements for day {0}'.format(day))
for i in range(len(dfs)):
axs[i].plot(dfs[i]['measurement'],marker='.', alpha=0.5, linestyle='None')
axs[i].set_title('Sensor {0}'.format(sensors[i]))
axs[i].set_ylabel('Temperature in °C')
plt.ylim([15,30])
plt.savefig(graphsDirectory+"day_{0}_sensors_{1}.pdf".format(day,str(sensors).replace(' ','')))
def visSingleSensor(df,day,sensor):
# print("sensor {0} on day {1}".format(sensor,day))
print(day,'&',sensor,'&',len(df),'&',df['measurement'].max(),"&", df['measurement'].min(),"&",df['measurement'].mean(),'&',df['measurement'][0],'&',df['measurement'][-1])
plt.clf()
plt.figure(figsize=(10, 5))
plt.plot(df['measurement'],marker='.', alpha=0.5, linestyle='None')
plt.title('Temperature for sensor {0} on day {1}'.format(sensor,day))
plt.ylabel('Temperature in °C')
# plt.show()
plt.savefig(graphsDirectory+"day_{0}_sensor_{1}.pdf".format(day,sensor))
def createGraphsDayOne():
firstDate = '2017-02-28'
for sens in [sensors1,sensors24]:
sensorDfs = []
for i in sens:
df = pd.read_csv(dataDirectory + firstDate + '_sensor_{0}.csv'.format(i), dtype={"measurement": float, "voltage": float})
df['time'] = pd.to_datetime(df['time'])
df.set_index('time',inplace=True)
df.index = df.index.time
visSingleSensor(df,1,i)
sensorDfs.append(df)
visDay(sensorDfs,sens,1)
def anomaliesDayOne():
firstDate = '2017-02-28'
for i in [1,24]:
df = pd.read_csv(dataDirectory + firstDate + '_sensor_{0}.csv'.format(i),
dtype={"measurement": float, "voltage": float})
df['time'] = pd.to_datetime(df['time'])
df.set_index('time', inplace=True)
# df.index = df.index.time
groups = df.groupby( | pd.Grouper(freq='60s') | pandas.Grouper |
import pandas as pd
import re
filename="/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/expt_summary_data/viral_seq/LASV_all_metadata_Raphaelle_2019-07-23.xlsx"
lsv_file = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/patient_rosters/acuteLassa_metadata_v2_2019-06-12_PRIVATE.csv"
df = pd.read_excel(filename, sheetname=1)
df.head()
# Pulling out the IDs from the sequencing string.
# Assuming segment_S / segment_L are identical aside from the accession number.
def getSeqID(row):
if(row.segment_S == row.segment_S):
return(row.segment_S)
return(row.segment_L)
df['seq_id'] = df.apply(getSeqID, axis = 1)
df.head()
df['acc_num'], df['id'], df['species'], df['country2'], df['date'] = df.seq_id.str.split("|").str
# Find the GIDs
def getGID(id):
if(id == id):
gid = re.search("G(\d\d\d\d)", id)
if(gid):
return(f"G-{gid[1]}")
gid5 = re.search("G(\d\d\d\d)\d", id)
if(gid5):
return(f"G-{gid5[1]}")
gid3 = re.search("G(\d\d\d)", id)
if(gid3):
return(f"G-0{gid3[1]}")
gid4_ = re.search("G-(\d\d\d\d)", id)
if(gid4_):
return(f"G-{gid4_[1]}")
gid3_ = re.search("G-(\d\d\d)", id)
if(gid3_):
return(f"G-0{gid3_[1]}")
df['gID'] = df.id.apply(getGID)
kgh.shape
kgh = df[df.gID == df.gID]
kgh.columns
# Admin1 == district (actually admin2)
kgh.admin1.value_counts(dropna=False)
# Mostly villages?
kgh.location.value_counts(dropna=False)
# [Read in the geographic info] ----------------------------------------------------------------------------------------------------
# Geo Data hasn't been cleaned up yet, so importing the geo data separately.
lsv_geo = pd.read_csv(lsv_file)
lsv_geo['gID'] = lsv_geo.gid.apply(getGID)
# very mild clean-- Ken is clearly Kenema
def cleanDistrict(district):
if(district == district):
if(district.title() == "Ken"):
return("Kenema")
return(district)
lsv_geo['district'] = lsv_geo.District.apply(cleanDistrict)
lsv_geo.head()
lsv_geo = lsv_geo[['gID', 'district', "Chiefdom", "Village"]]
merged = | pd.merge(kgh, lsv_geo, how="left", left_on="gID", right_on="gID", indicator=True) | pandas.merge |
import os
import sys
import json
import copy
import numpy as np
import pandas as pd
import random
import tensorflow as tf
# import PIL
seed_value = 123
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
tf.set_random_seed(seed_value)
from keras.utils import to_categorical
import keras.backend as k
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
k.set_session(tf.Session(config=config))
sys.path.append('/'.join(os.getcwd().split('/')))
from ornstein_auto_encoder import logging_daily
from ornstein_auto_encoder import configuration
from ornstein_auto_encoder import readers
from ornstein_auto_encoder import samplers
from ornstein_auto_encoder import build_network
from ornstein_auto_encoder.utils import argv_parse
if '1.15' in tf.__version__:
from ornstein_auto_encoder.fid_v1_15 import get_fid as _get_fid
else:
from ornstein_auto_encoder.fid import get_fid as _get_fid
from ornstein_auto_encoder.inception_score import get_inception_score as _get_inception_score
#####################################################################################################
def get_fid(images1, images2):
imgs1 = np.clip(255*((images1).transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
imgs2 = np.clip(255*((images2).transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
return _get_fid(imgs1, imgs2)
def get_is(images, size=100):
imgs = np.clip(255*(images.transpose([0,3,1,2]) * 0.5 + 0.5),0,255) #.astype(np.uint8)
return _get_inception_score(imgs, splits=1)[0]
if __name__=='__main__':
argdict = argv_parse(sys.argv)
logger = logging_daily.logging_daily(argdict['log_info'][0])
logger.reset_logging()
log = logger.get_logging()
log.setLevel(logging_daily.logging.INFO)
log.info('-----------------------------------------------------------------------------------')
log.info('Evaluate the performance measures for VGGFace2')
log.info('-----------------------------------------------------------------------------------')
model_path = argdict['model_path'][0].strip()
try:
model_aka = argdict['model_aka'][0].strip()
except:
model_aka = model_path.split('/')[-1]
feature_b = True
path_info_config = argdict['path_info'][0]
network_info_config = argdict['network_info'][0]
##############################################################################################
# Set hyper-parameter for testing
config_data = configuration.Configurator(path_info_config, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(network_info_config, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
##############################################################################################
# Reader
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, path_info, network_info, mode='train', verbose=True)
def get_numerics(model_path, model_aka,
path_info_config = "configurations/vggface2/psoae_path_info.cfg",
network_info_config = "configurations/vggface2/psoae_network_total_info.cfg",
unknown=False, feature_b=False):
# Set hyper-parameter for testing
config_data = configuration.Configurator(path_info_config, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(network_info_config, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
log.info('-----------------------------------------------------------------')
unknown = unknown
log.info('%s: unknown=%s' % (model_aka, unknown))
log.info('-----------------------------------------------------------------')
config_data = configuration.Configurator(argdict['path_info'][0], log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator(argdict['network_info'][0], log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
# Set hyper-parameter for testing
path_info['model_info']['model_dir'] = model_path
if network_info['model_info']['network_class'] == 'ProductSpaceOAEFixedBHSIC_GAN':
network_info['model_info']['network_class'] == 'ProductSpaceOAEHSIC_GAN'
if float(network_info['model_info']['e_weight']) == 0.: network_info['model_info']['e_weight'] = '1.'
if network_info['training_info']['warm_start'] == 'True':
network_info['training_info']['warm_start'] = 'False'
network_info['training_info']['warm_start_model'] = ''
if network_info['model_info']['augment'] == 'True':
network_info['model_info']['augment'] = 'False'
### Bulid network ####################################################################################
log.info('-----------------------------------------------------------------')
network_class = getattr(build_network, ''.join(network_info['model_info']['network_class'].strip().split('FixedB')))
network = network_class(log, path_info, network_info, n_label=reader.get_n_label())
network.build_model('./%s/%s' % (model_path, path_info['model_info']['model_architecture']), verbose=0)
network.load(model_path)
log.info('-----------------------------------------------------------------')
# Training
test_tot_idxs_path = os.path.join(model_path, path_info['model_info']['test_tot_idxs'])
test_idx = np.load(test_tot_idxs_path)
if unknown:
# Real Test data sampler (not-trained subject)
new_network_info = copy.deepcopy(network_info)
new_path_info = copy.deepcopy(path_info)
new_reader = reader_class(log, new_path_info, new_network_info, mode='test', verbose=False)
real_test_idx = np.arange(new_reader.get_label().shape[0])
test_idx = real_test_idx
log.info('Construct test data sampler')
validation_sampler_class = getattr(samplers, network_info['validation_info']['sampler_class'].strip())
if unknown:
test_sampler = validation_sampler_class(log, test_idx, new_reader, network_info['validation_info'], verbose=False)
else:
test_sampler = validation_sampler_class(log, test_idx, reader, network_info['validation_info'], verbose=False)
tot_sharpness_original = []
tot_is_original = []
# tot_reconstruction = []
tot_gen_fid = []
tot_gen_is = []
tot_sharpness_gen = []
tot_one_shot_gen_fid = []
tot_one_shot_gen_is = []
tot_one_shot_sharpness_gen = []
for nrepeat in range(10):
log.info('-%d------------------------------------------------' % nrepeat)
nunit = 30
nobservations = 300
picked_y_class = np.random.choice(test_sampler.y_class, nunit, replace=False)
test_idxs = []
picked_one_shot_idxs = []
for yc in picked_y_class:
try: chosen_observations = np.random.choice(test_sampler.train_idx[test_sampler.y_index.get_loc(yc)], nobservations)
except: chosen_observations = np.random.choice(test_sampler.train_idx[test_sampler.y_index.get_loc(yc)], nobservations, replace=True)
test_idxs.append(chosen_observations)
picked_one_shot_idxs.append(np.random.choice(np.arange(nobservations), 1)[0])
test_idxs = np.array(test_idxs).flatten()
picked_one_shot_idxs = np.array(picked_one_shot_idxs)
x, y = test_sampler.reader.get_batch(test_idxs)
y_table = pd.Series(y)
y_index = pd.Index(y)
y_class = y_table.unique()
y_table = | pd.Series(y) | pandas.Series |
import requests
from typing import List
import re
# from nciRetriever.updateFC import updateFC
# from nciRetriever.csvToArcgisPro import csvToArcgisPro
# from nciRetriever.geocode import geocodeSites
# from nciRetriever.createRelationships import createRelationships
# from nciRetriever.zipGdb import zipGdb
# from nciRetriever.updateItem import update
# from nciRetriever.removeTables import removeTables
from datetime import date
import pandas as pd
import logging
from urllib.parse import urljoin
import json
import time
import sys
import os
from pprint import pprint
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
today = date.today()
# nciThesaurus = pd.read_csv('thesaurus.csv')
# uniqueMainDiseasesDf = pd.read_csv('nciUniqueMainDiseasesReference.csv')
# uniqueSubTypeDiseasesDf = pd.read_csv('nciUniqueSubTypeDiseasesReference.csv')
# uniqueDiseasesWithoutSynonymsDf = pd.read_csv('nciUniqueDiseasesWithoutSynonymsReference.csv')
def createTrialDict(trial: dict) -> dict:
trialDict = {'nciId': trial['nci_id'],
'protocolId': trial['protocol_id'],
'nctId': trial['nct_id'],
'detailDesc': trial['detail_description'],
'officialTitle': trial['official_title'],
'briefTitle': trial['brief_title'],
'briefDesc': trial['brief_summary'],
'phase': trial['phase'],
'leadOrg': trial['lead_org'],
'amendmentDate': trial['amendment_date'],
'primaryPurpose': trial['primary_purpose'],
'currentTrialStatus': trial['current_trial_status'],
'startDate': trial['start_date']}
if 'completion_date' in trial.keys():
trialDict.update({'completionDate': trial['completion_date']})
if 'active_sites_count' in trial.keys():
trialDict.update({'activeSitesCount': trial['active_sites_count']})
if 'max_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'maxAgeInYears': int(trial['eligibility']['structured']['max_age_in_years'])})
if 'min_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'minAgeInYears': int(trial['eligibility']['structured']['min_age_in_years']) if trial['eligibility']['structured']['min_age_in_years'] is not None else None})
if 'gender' in trial['eligibility']['structured'].keys():
trialDict.update({'gender': trial['eligibility']['structured']['gender']})
if 'accepts_healthy_volunteers' in trial['eligibility']['structured'].keys():
trialDict.update({'acceptsHealthyVolunteers': trial['eligibility']['structured']['accepts_healthy_volunteers']})
if 'study_source' in trial.keys():
trialDict.update({'studySource': trial['study_source']})
if 'study_protocol_type' in trial.keys():
trialDict.update({'studyProtocolType': trial['study_protocol_type']})
if 'record_verification_date' in trial.keys():
trialDict.update({'recordVerificationDate': trial['record_verification_date']})
return trialDict
def createSiteDict(trial:dict, site:dict) -> dict:
siteDict = {'nciId': trial['nci_id'],
'orgStateOrProvince': site['org_state_or_province'],
'contactName': site['contact_name'],
'contactPhone': site['contact_phone'],
'recruitmentStatusDate': site['recruitment_status_date'],
'orgAddressLine1': site['org_address_line_1'],
'orgAddressLine2': site['org_address_line_2'],
'orgVa': site['org_va'],
'orgTty': site['org_tty'],
'orgFamily': site['org_family'],
'orgPostalCode': site['org_postal_code'],
'contactEmail': site['contact_email'],
'recruitmentStatus': site['recruitment_status'],
'orgCity': site['org_city'],
'orgEmail': site['org_email'],
'orgCountry': site['org_country'],
'orgFax': site['org_fax'],
'orgPhone': site['org_phone'],
'orgName': site['org_name']
}
# if 'org_coordinates' in site.keys():
# siteDict['lat'] = site['org_coordinates']['lat']
# siteDict['long'] = site['org_coordinates']['lon']
return siteDict
def createBiomarkersDicts(trial:dict, marker:dict) -> List[dict]:
parsedBiomarkers = []
for name in [*marker['synonyms'], marker['name']]:
biomarkerDict = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': name,
'assayPurpose': marker['assay_purpose']
}
if 'eligibility_criterion' in marker.keys():
biomarkerDict.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
biomarkerDict.update({'inclusionIndicator': marker['inclusion_indicator']})
parsedBiomarkers.append(biomarkerDict)
return parsedBiomarkers
def createMainBiomarkersDict(trial:dict, marker:dict) -> dict:
parsedBiomarker = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': marker['name'],
'assayPurpose': marker['assay_purpose'],
}
if 'eligibility_criterion' in marker.keys():
parsedBiomarker.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
parsedBiomarker.update({'inclusionIndicator': marker['inclusion_indicator']})
return parsedBiomarker
def createDiseasesDicts(trial:dict, disease:dict) -> List[dict]:
parsedDiseases = []
try:
names = [disease['name']]
if 'synonyms' in disease.keys():
names.extend(disease['synonyms'])
except KeyError:
logger.error(f'Invalid key for diseases. Possible keys: {disease.keys()}')
return parsedDiseases
for name in names:
diseaseDict = {
'inclusionIndicator': disease['inclusion_indicator'],
'isLeadDisease': disease['is_lead_disease'],
'name': name,
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'nciId': trial['nci_id']
}
parsedDiseases.append(diseaseDict)
return parsedDiseases
def createMainToSubTypeRelDicts(trial:dict, disease:dict) -> List[dict]:
if 'subtype' not in disease['type']:
return []
relDicts = []
for parent in disease['parents']:
relDicts.append({
'maintype': parent,
'subtype': disease['nci_thesaurus_concept_id']
})
return relDicts
def createDiseasesWithoutSynonymsDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueDiseasesWithoutSynonymsDf.loc[uniqueDiseasesWithoutSynonymsDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# logger.error('Disease not found in full reference. Aborting insertion...')
# return {}
# # logger.debug(correctDisease['name'].values[0])
# # time.sleep(2)
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createMainDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueMainDiseasesDf.loc[uniqueMainDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'maintype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createSubTypeDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueSubTypeDiseasesDf.loc[uniqueSubTypeDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'subtype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for subtype diseases. Not adding to list...')
return {}
def createArmsDict(trial:dict, arm:dict) -> dict:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
return {
'nciId': trial['nci_id'],
'name': arm['name'],
'nciIdWithName': f'{trial["nci_id"]}_{parsedArm}',
'description': arm['description'],
'type': arm['type']
}
def createInterventionsDicts(trial:dict, arm:dict) -> List[dict]:
parsedInterventions = []
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
for intervention in arm['interventions']:
names = intervention['synonyms']
if 'name' in intervention.keys():
names.append(intervention['name'])
elif 'intervention_name' in intervention.keys():
names.append(intervention['intervention_name'])
for name in names:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError as e:
logger.exception(e)
logger.error(f'Invalid intervention keys. Possible keys are: {intervention.keys()}')
continue
parsedInterventions.append(interventionDict)
return parsedInterventions
def createMainInterventionDicts(trial:dict, arm:dict) -> List[dict]:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
parsedMainInterventions = []
for intervention in arm['interventions']:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['intervention_name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError:
logger.error(f'Unexpected intervention keys: {intervention.keys()}. Not inserting...')
continue
parsedMainInterventions.append(mainInterventionDict)
return parsedMainInterventions
def deDuplicateTable(csvName:str, deduplicationList:List[str]):
df = pd.read_csv(csvName)
df.drop_duplicates(subset=deduplicationList, inplace=True)
df.to_csv(csvName, index=False)
def correctMainToSubTypeTable(today):
mainDf = pd.read_csv(f'nciUniqueMainDiseases{today}.csv')
subTypeDf = pd.read_csv(f'nciUniqueSubTypeDiseases{today}.csv')
relDf = pd.read_csv(f'MainToSubTypeRelTable{today}.csv')
for idx, row in relDf.iterrows():
parentId = row['maintype']
if parentId in mainDf['nciThesaurusConceptId'].values:
continue
elif parentId in subTypeDf['nciThesaurusConceptId'].values:
while True:
possibleMainTypesDf = relDf[relDf['subtype'] == parentId]
if possibleMainTypesDf.empty:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
#setting the parentId value with the parent of the subtype found
for value in possibleMainTypesDf['maintype'].values:
if parentId == value:
continue
parentId = value
break
else:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
# parentId = possibleMainTypesDf['maintype'].values[0]
if parentId in mainDf['nciThesaurusConceptId'].values:
break
if parentId == '':
continue
relDf.iloc[idx]['maintype'] = parentId
else:
pass
relDf.to_csv(f'MainToSubTypeRelTable{today}.csv', index=False)
# logger.error(f'maintype id {parentId} is not found in main diseases or subtype diseases')
def createUniqueSitesCsv(today):
logger.debug('Reading sites...')
sitesDf = pd.read_csv(f'nciSites{today}.csv')
logger.debug('Dropping duplicates and trial-depedent information...')
sitesDf.drop_duplicates(subset='orgName', inplace=True)
sitesDf.drop(['recruitmentStatusDate', 'recruitmentStatus', 'nciId'], axis=1, inplace=True)
logger.debug('Saving unique sites table...')
sitesDf.to_csv(f'nciUniqueSites{today}.csv', index=False)
def createUniqueDiseasesWithoutSynonymsCsv(today):
logger.debug('Reading diseases without synonyms...')
diseasesWithoutSynonymsDf = pd.read_csv(f'nciDiseasesWithoutSynonyms{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
diseasesWithoutSynonymsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
diseasesWithoutSynonymsDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
diseasesWithoutSynonymsDf.dropna()
logger.debug('Saving unique diseases table...')
diseasesWithoutSynonymsDf.to_csv(f'nciUniqueDiseasesWithoutSynonyms{today}.csv', index=False)
def createUniqueMainDiseasesCsv(today):
logger.debug('Reading main diseases...')
mainDiseasesDf = pd.read_csv(f'nciMainDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
mainDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
mainDiseasesDf.to_csv(f'nciUniqueMainDiseases{today}.csv', index=False)
def createUniqueSubTypeDiseasesCsv(today):
logger.debug('Reading main diseases...')
subTypeDiseasesDf = pd.read_csv(f'nciSubTypeDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
subTypeDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
subTypeDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
subTypeDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
subTypeDiseasesDf.to_csv(f'nciUniqueSubTypeDiseases{today}.csv', index=False)
def createUniqueBiomarkersCsv(today):
logger.debug('Reading main biomarkers...')
mainBiomarkersDf = pd.read_csv(f'nciMainBiomarkers{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainBiomarkersDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainBiomarkersDf.drop(['eligibilityCriterion', 'inclusionIndicator', 'assayPurpose', 'nciId'], axis=1, inplace=True)
mainBiomarkersDf.dropna()
logger.debug('Saving unique biomarkers table...')
mainBiomarkersDf.to_csv(f'nciUniqueMainBiomarkers{today}.csv', index=False)
def createUniqueInterventionsCsv(today):
logger.debug('Reading main interventions...')
mainInterventionsDf = pd.read_csv(f'nciMainInterventions{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainInterventionsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainInterventionsDf.drop(['nciId', 'inclusionIndicator', 'arm', 'nciIdWithArm'], axis=1, inplace=True)
mainInterventionsDf.dropna()
logger.debug('Saving unique interventions table...')
mainInterventionsDf.to_csv(f'nciUniqueMainInterventions{today}.csv', index=False)
def retrieveToCsv():
baseUrl = r'https://clinicaltrialsapi.cancer.gov/api/v2/'
with open('./nciRetriever/secrets/key.txt', 'r') as f:
apiKey = f.read()
headers = {
'X-API-KEY': apiKey,
'Content-Type': 'application/json'
}
trialEndpoint = urljoin(baseUrl, 'trials')
logger.debug(trialEndpoint)
#sending initial request to get the total number of trials
trialsResponse = requests.get(trialEndpoint, headers=headers, params={'trial_status': 'OPEN'})
trialsResponse.raise_for_status()
trialJson = trialsResponse.json()
totalNumTrials = trialJson['total']
logger.debug(f'Total number of trials: {totalNumTrials}')
start = time.perf_counter()
createdTrialCsv = False
createdSiteCsv = False
createdEligibilityCsv = False
createdBiomarkerCsv = False
createdMainBiomarkerCsv = False
createdDiseaseCsv = False
createdMainToSubTypeRelTableCsv = False
createdDiseaseWithoutSynonymsCsv = False
createdMainDiseaseCsv = False
createdSubTypeDiseaseCsv = False
createdArmsCsv = False
createdInterventionCsv = False
createdMainInterventionCsv = False
for trialNumFrom in range(0, totalNumTrials, 50):
sectionStart = time.perf_counter()
#creating the dataframes again after every 50 trials to avoid using too much memory
trialsDf = pd.DataFrame(columns=['protocolId',
'nciId',
'nctId',
'detailDesc',
'officialTitle',
'briefTitle',
'briefDesc',
'phase',
'leadOrg',
'amendmentDate',
'primaryPurpose',
'activeSitesCount',
'currentTrialStatus',
'startDate',
'completionDate',
'maxAgeInYears',
'minAgeInYears',
'gender',
'acceptsHealthyVolunteers',
'studySource',
'studyProtocolType',
'recordVerificationDate'])
sitesDf = pd.DataFrame(columns=['nciId',
'orgStateOrProvince',
'contactName',
'contactPhone',
'recruitmentStatusDate',
'orgAddressLine1',
'orgAddressLine2',
'orgVa',
'orgTty',
'orgFamily',
'orgPostalCode',
'contactEmail',
'recruitmentStatus',
'orgCity',
'orgEmail',
'orgCounty',
'orgFax',
'orgPhone',
'orgName'])
eligibilityDf = pd.DataFrame(columns=['nciId',
'inclusionIndicator',
'description'])
biomarkersDf = pd.DataFrame(columns=[
'nciId',
'eligibilityCriterion',
'inclusionIndicator',
'nciThesaurusConceptId',
'name',
'assayPurpose'
])
mainBiomarkersDf = pd.DataFrame(columns=[
'nciId',
'eligibilityCriterion',
'inclusionIndicator',
'nciThesaurusConceptId',
'name',
'assayPurpose'
])
diseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
mainToSubTypeRelsDf = pd.DataFrame(columns=[
'maintype',
'subtype'
])
mainDiseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
diseasesWithoutSynonymsDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
subTypeDiseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
armsDf = pd.DataFrame(columns=[
'nciId',
'name',
'nciIdWithName',
'description',
'type'
])
interventionsDf = pd.DataFrame(columns=[
'nciId',
'arm',
'nciIdWithArm',
'type',
'inclusionIndicator',
'name',
'category',
'nciThesaurusConceptId',
'description'
])
mainInterventionsDf = pd.DataFrame(columns=[
'nciId',
'arm',
'nciIdWithArm',
'type',
'inclusionIndicator',
'name',
'category',
'nciThesaurusConceptId',
'description'
])
payload = {
'size': 50,
'trial_status': 'OPEN',
'from': trialNumFrom
}
response = requests.get(trialEndpoint, headers=headers, params=payload)
response.raise_for_status()
sectionJson = response.json()
trials = []
for trial in sectionJson['data']:
trials.append(createTrialDict(trial))
if trial['eligibility']['unstructured'] is not None:
#parsing the unstructured eligibility information from the trial
eligibilityInfo = []
for condition in trial['eligibility']['unstructured']:
eligibilityInfo.append({
'nciId': trial['nci_id'],
'inclusionIndicator': condition['inclusion_indicator'],
'description': condition['description']
})
conditionDf = pd.DataFrame.from_records(eligibilityInfo)
eligibilityDf = pd.concat([eligibilityDf, conditionDf], verify_integrity=True, ignore_index=True)
if trial['sites'] is not None:
#parsing the sites associated with the trial
sites = []
for site in trial['sites']:
sites.append(createSiteDict(trial, site))
siteDf = pd.DataFrame.from_records(sites)
sitesDf = pd.concat([sitesDf, siteDf], ignore_index=True, verify_integrity=True)
if trial['biomarkers'] is not None:
#parsing the biomarkers associated with the trial
biomarkers = []
mainBiomarkers = []
for biomarker in trial['biomarkers']:
# biomarkers.extend(createBiomarkersDicts(trial, biomarker))
mainBiomarkersDict = createMainBiomarkersDict(trial, biomarker)
if mainBiomarkersDict != {}:
mainBiomarkers.append(mainBiomarkersDict)
# biomarkerDf = pd.DataFrame.from_records(biomarkers)
# biomarkersDf = pd.concat([biomarkersDf, biomarkerDf], ignore_index=True, verify_integrity=True)
mainBiomarkerDf = pd.DataFrame.from_records(mainBiomarkers)
mainBiomarkersDf = pd.concat([mainBiomarkersDf, mainBiomarkerDf], ignore_index=True, verify_integrity=True)
if trial['diseases'] is not None:
# diseases = []
mainToSubTypeRel = []
mainDiseases = []
subTypeDiseases = []
diseasesWithoutSynonyms = []
for disease in trial['diseases']:
# diseasesDicts = createDiseasesDicts(trial, disease)
# diseases.extend(diseasesDicts)
mainDiseasesDict = createMainDiseasesDict(trial, disease)
if mainDiseasesDict != {}:
mainDiseases.append(mainDiseasesDict)
subTypeDiseasesDict = createSubTypeDiseasesDict(trial, disease)
if subTypeDiseasesDict != {}:
subTypeDiseases.append(subTypeDiseasesDict)
diseasesWithoutSynonymsDict = createDiseasesWithoutSynonymsDict(trial, disease)
if diseasesWithoutSynonymsDict != {}:
diseasesWithoutSynonyms.append(diseasesWithoutSynonymsDict)
mainToSubTypeRel.extend(createMainToSubTypeRelDicts(trial, disease))
# diseaseDf = pd.DataFrame.from_records(diseases)
# diseasesDf = pd.concat([diseasesDf, diseaseDf], ignore_index=True, verify_integrity=True)
mainToSubTypeRelDf = pd.DataFrame.from_records(mainToSubTypeRel)
mainToSubTypeRelsDf = | pd.concat([mainToSubTypeRelsDf, mainToSubTypeRelDf], ignore_index=True, verify_integrity=True) | pandas.concat |
import os
import pandas as pd
import cv2
import scipy.stats as stat
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from .matplotlibstyle import *
import datetime
class Datahandler():
'Matches EL images paths to IV data based on imput columns'
def __init__(self,workdir,ELfolderpath=None,IVfile=None):
'initialize and create folder'
self.dataset_id = None
# Create directory for computation on this dataset
self.pathDic = {
'workdir': workdir,
'ELfolderpath': ELfolderpath,
'IVfile': IVfile,
'figures': workdir+"figures\\",
'models': workdir+"models\\",
'traces': workdir+"traces\\",
'outputs': workdir+"outputs\\",
'Matchfile': workdir+"match.csv",
}
for key, value in self.pathDic.items():
if key in ['ELfolderpath','IVfile','Matchfile']: continue
if not os.path.exists(value): os.mkdir(value)
if os.path.exists(self.pathDic['Matchfile']):
self.loadMatchData()
def readEL(self):
'Read images from ELfolderpath and store in dataframe'
if not self.pathDic['ELfolderpath']: raise ValueError('ELfolderpath not defined')
images = []
for subdir,dirs,files in os.walk(self.pathDic['ELfolderpath']):
for file in files:
ext = os.path.splitext(file)[1]
if ext == ".db": continue
name = os.path.splitext(file)[0]
size = os.path.getsize(subdir+"\\"+file)
location = subdir+"\\"+file
line = size,ext,name,location
images.append(line)
self.ELdf = | pd.DataFrame(images) | pandas.DataFrame |
import time
import gc
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from traffic_analysis.d00_utils.bbox_helpers import (bboxcv2_to_bboxcvlib,
bboxcvlib_to_bboxcv2,
display_bboxes_on_frame,
color_bboxes,
bbox_intersection_over_union)
from traffic_analysis.d00_utils.video_helpers import write_mp4
from traffic_analysis.d04_modelling.traffic_analyser_interface import TrafficAnalyserInterface
from traffic_analysis.d04_modelling.tracking.vehicle_fleet import VehicleFleet
from traffic_analysis.d04_modelling.perform_detection_opencv import detect_objects_cv
from traffic_analysis.d04_modelling.perform_detection_tensorflow import detect_objects_tf
from traffic_analysis.d04_modelling.perform_detection_tensorflow import initialize_tensorflow_model
class TrackingAnalyser(TrafficAnalyserInterface):
def __init__(self, params: dict, paths: dict, blob_credentials: dict):
"""
(General parameters):
selected_labels -- labels which we wish to detect
Model-specific parameters initialized below:
(Object detection arguments:)
detection_model -- specify the name of model you want to use for detection
detection_implementation -- specify model to use for detection
detection_frequency -- each detection_frequency num of frames, run obj detection alg again to detect new objs
detection_confidence_threshold -- conf above which to return label
detection_nms_threshold -- yolo param
(Object tracking parameters)
tracking_model -- specify name of model you want to use for tracking (currently only supports OpenCV trackers)
iou_threshold -- specify threshold to use to decide whether two detected objs should be considered the same
(Stop start arguments:)
iou_convolution_window -- frame window size to perform iou computation on (to get an IOU time
series for each vehicle)
smoothing_method -- method to smooth the IOU time series for each vehicle
stop_start_iou_threshold -- threshold to binarize the IOU time series into 0 or 1,denoting "moving" or "stopped"
"""
super().__init__(params, paths)
# general settings
self.params = params
self.paths = paths
self.blob_credentials = blob_credentials
self.selected_labels = params['selected_labels']
# object detection settings
self.detection_model = params['detection_model']
# TODO: to be replaced in transfer learning PR
self.detection_implementation = params['detection_implementation']
self.detection_frequency = params['detection_frequency']
self.detection_confidence_threshold = params['detection_confidence_threshold']
self.detection_nms_threshold = params['detection_nms_threshold']
if self.detection_model == 'yolov3_tf':
self.sess = tf.Session()
self.model_initializer, self.init_data, self.detection_model = initialize_tensorflow_model(
params=self.params,
paths=self.paths,
s3_credentials=self.blob_credentials,
sess=self.sess)
# tracking settings
self.tracker_type = params['opencv_tracker_type']
self.trackers = []
self.iou_threshold = params['iou_threshold']
# post-processing for stop-starts settings
self.iou_convolution_window = params['iou_convolution_window']
self.smoothing_method = params['smoothing_method']
self.stop_start_iou_threshold = params['stop_start_iou_threshold']
# speedup settings
self.skip_no_of_frames = params['skip_no_of_frames']
def add_tracker(self):
tracker = self.create_tracker_by_name(
tracker_type=self.tracker_type)
if tracker:
self.trackers.append(tracker)
return tracker
def create_tracker_by_name(self, tracker_type: str):
"""Create tracker based on tracker name"""
tracker_types = {'boosting': cv2.TrackerBoosting_create(),
'mil': cv2.TrackerMIL_create(),
'kcf': cv2.TrackerKCF_create(),
'tld': cv2.TrackerTLD_create(),
'medianflow': cv2.TrackerMedianFlow_create(),
'goturn': cv2.TrackerGOTURN_create(),
'mosse': cv2.TrackerMOSSE_create(),
'csrt': cv2.TrackerCSRT_create()}
try:
return tracker_types[tracker_type]
except Exception as e:
print('Incorrect tracker name')
print('Available trackers are:')
print("\n".join(tracker_types.keys()))
return None
def determine_new_bboxes(self, bboxes_tracked: list, bboxes_detected: list) -> list:
"""Return the indices of "new" bboxes in bboxes_detected so that a new tracker can be added for that
Args:
bboxes_tracked: bboxes which are currently tracked. bboxes should be passed in in format (xmin,ymin,w,h)
bboxes_detected: bboxes which are newly detected. bboxes should be passed in in format (xmin,ymin,w,h)
iou_threshold: a detected bbox with iou below the iou_threshold (as compared to all existing, tracked bboxes)
will be considered new.
Returns:
new_bbox_inds: indices of newly detected bounding boxes
"""
new_bboxes_inds = set(range(len(bboxes_detected))
) # init with all inds
old_bboxes_inds = set()
for i, box_a in enumerate(bboxes_detected):
# if find a box which has high IOU with an already-tracked box, consider it an old box
for box_b in bboxes_tracked:
# format conversion needed
iou = bbox_intersection_over_union(
bboxcv2_to_bboxcvlib(box_a), bboxcv2_to_bboxcvlib(box_b))
if iou > self.iou_threshold: # assume bbox has already been tracked and is not new
old_bboxes_inds.add(i) # add to set
new_bboxes_inds = list(new_bboxes_inds.difference(old_bboxes_inds))
return new_bboxes_inds
def add_to_multi_tracker(self,
multi_tracker: cv2.MultiTracker,
frame: np.ndarray,
frame_height: int,
frame_width: int,
bbox):
"""Add bbox to the multitracker as a new tracker
"""
try:
multi_tracker.add(newTracker=self.add_tracker(),
image=frame,
boundingBox=tuple(bbox))
except Exception as e:
# convert bbox
if self.verbose:
print(e)
print(f"bbox is {bbox}")
print("Retrying with bbox formatting corrections...")
if (bbox[0] <= bbox[2]) and (bbox[1] <= bbox[3]):
# format: (xmin, ymin, width, height)
bbox = bboxcvlib_to_bboxcv2(bbox)
for i in range(4):
# correct neg coords
if bbox[i] < 0:
bbox[i] = 0
# check ind coords don't go outside frame
if i % 2 == 0: # xmin and width
if bbox[i] > frame_width:
bbox[i] = frame_width
else: # ymin and height
if bbox[i] > frame_height:
bbox[i] = frame_height
# check sum of coords don't go outside frame
if bbox[0] + bbox[2] > frame_width:
bbox[2] = frame_width - bbox[0]
if bbox[1] + bbox[3] > frame_height:
bbox[3] = frame_height - bbox[1]
try:
multi_tracker.add(newTracker=self.add_tracker(),
image=frame,
boundingBox=tuple(bbox))
except:
raise
def detect_and_track_objects(self,
video: np.ndarray,
video_name: str,
video_time_length=10,
make_video=False,
local_mp4_dir: str = None) -> VehicleFleet:
"""Code to track
This function will initialize a specified tracking algorithm
(currently only supports OpenCV's built in multitracker methods) with the specified object
detection algorithm. Each detection_frequency frames, the object detection will be run again
to detect any new objects which have entered the frame. A VehicleFleet object is used to track the
initial detection confidence and label for each vehicle as it is detected, and the updated locations
of the bounding boxes for each vehicle each frame. The VehicleFleet object also performs IOU computations
on the stored bounding box information to get counts and stop starts.
Args:
video -- np array in format (frame_count,frame_height,frame_width,3)
video_name -- name of video to run on (include .mp4 extension)
video_time_length -- specify length of video
make_video -- if true, will write video to local_mp4_dir with name local_mp4_name_tracked.mp4
local_mp4_dir -- path to directory to store video in
Returns:
fleet -- VehicleFleet object containing bbox history for all vehicles tracked
"""
start_time = time.time()
# Create a video capture object to read videos
n_frames, frame_height, frame_width = video.shape[:3]
# assumes vid_length in seconds
video_frames_per_sec = int(n_frames / video_time_length)
# TODO: Distangle the two parameters for length of tracking without detect and skip frames
frame_interval = self.skip_no_of_frames + 1
frame_detection_inds = np.arange(0, n_frames,
max(1, self.skip_no_of_frames * frame_interval))
frames = video[frame_detection_inds, :, :, :]
all_bboxes, all_labels, all_confs = self.detect_objects_in_frames(frames)
bboxes = all_bboxes[0]
labels = all_labels[0]
confs = all_confs[0]
# store info returned above in vehicleFleet object
fleet = VehicleFleet(bboxes=np.array(bboxes),
labels=np.array(labels),
confs=np.array(confs),
video_name=video_name.replace(".mp4", ""))
# Create MultiTracker object using bboxes, initialize multitracker
multi_tracker = cv2.MultiTracker_create()
for bbox in bboxes:
self.add_to_multi_tracker(multi_tracker=multi_tracker,
frame=video[0, :, :, :],
frame_height=frame_height,
frame_width=frame_width,
bbox=bbox)
if make_video:
processed_video = []
print(f"The number of frames is {n_frames}")
previous_frame_index = 0
# Process video and track objects
for frame_ind in range(1, n_frames):
if (frame_ind % frame_interval) and (frame_ind + frame_interval) <= n_frames:
continue
frame = video[frame_ind, :, :, :]
# get updated location of objects in subsequent frames, update fleet obj
success, bboxes_tracked = multi_tracker.update(
image=frame)
for _ in range(frame_ind - previous_frame_index):
fleet.update_vehicles(np.array(bboxes_tracked))
previous_frame_index = frame_ind
if make_video:
# draw tracked objects
display_bboxes_on_frame(frame, bboxes_tracked,
color_bboxes(fleet.labels),
fleet.compute_label_confs())
# every x frames, re-detect boxes
if frame_ind in frame_detection_inds.tolist():
ind = int(np.squeeze(
np.where(frame_detection_inds == frame_ind)))
if(ind >= all_bboxes.__len__()):
ind = -1
bboxes_detected = all_bboxes[ind]
labels_detected = all_labels[ind]
confs_detected = all_confs[ind]
# re-initialize MultiTracker
new_bbox_inds = self.determine_new_bboxes(bboxes_tracked,
bboxes_detected)
# update fleet object
if len(new_bbox_inds) > 0:
new_bboxes = [bboxes_detected[i] for i in new_bbox_inds]
new_labels = [labels_detected[i] for i in new_bbox_inds]
new_confs = [confs_detected[i] for i in new_bbox_inds]
fleet.add_vehicles(np.array(new_bboxes),
np.array(new_labels),
np.array(new_confs))
# iterate through new bboxes
for new_bbox in new_bboxes:
self.add_to_multi_tracker(multi_tracker=multi_tracker,
frame=frame,
frame_height=frame_height,
frame_width=frame_width,
bbox=new_bbox)
if make_video:
processed_video.append(frame)
# code to display video frame by frame while it is being processed
# cv2.imshow('MultiTracker', frame)
# # quit on ESC button
# if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
# break
if make_video:
write_mp4(local_mp4_dir=local_mp4_dir,
mp4_name=video_name + "_tracked.mp4",
video=np.array(processed_video),
fps=video_frames_per_sec)
print(
f'Run time of tracking analyser for one video is {time.time() - start_time} seconds. \nSkipped {frame_interval-1} frames.\nNumber of frames is {fleet.bboxes.shape[2]}.')
print('Run time of tracking analyser for one video is %s seconds' %
(time.time() - start_time))
return fleet
def detect_objects_in_frames(self, frames):
all_bboxes = []
all_labels = []
all_confs = []
if self.detection_model == 'yolov3' or self.detection_model == 'yolov3-tiny':
for frame in frames:
bboxes, labels, confs = detect_objects_cv(image_capture=frame,
params=self.params,
paths=self.paths,
blob_credentials=self.blob_credentials,
selected_labels=self.selected_labels)
all_bboxes.append(bboxes)
all_labels.append(labels)
all_confs.append(confs)
elif self.detection_model == 'yolov3_tf':
all_bboxes, all_labels, all_confs = detect_objects_tf(images=frames,
paths=self.paths,
detection_model=self.detection_model,
model_initializer=self.model_initializer,
init_data=self.init_data,
sess=self.sess,
selected_labels=self.selected_labels)
return all_bboxes, all_labels, all_confs
def cleanup_on_finish(self):
if self.detection_model == 'yolov3_tf' or self.detection_model == 'traffic_tf':
self.sess.close()
tf.reset_default_graph()
gc.collect()
def construct_frame_level_df(self, video_dict) -> pd.DataFrame:
"""Construct frame level df for multiple videos
Args:
video_dict: key is video filename, key is np array of videos
Returns:
pd Dataframe of all frame level info
"""
# Check that video doesn't come from in-use camera (some are)
for video_name in list(video_dict.keys()):
n_frames = video_dict[video_name].shape[0]
if n_frames < 75:
del video_dict[video_name]
print("Video ", video_name,
" has been removed from processing because it may be invalid")
frame_info_list = []
if not len(video_dict):
return None
for video_name, video in video_dict.items():
fleet = self.detect_and_track_objects(video, video_name)
single_frame_level_df = fleet.report_frame_level_info()
frame_info_list.append(single_frame_level_df)
return pd.concat(frame_info_list)
def construct_video_level_df(self, frame_level_df) -> pd.DataFrame:
"""Construct video-level stats table using tracking techniques
Args:
frame_level_df -- df returned by above function
Returns:
pd DataFrame of all video level info
"""
if frame_level_df.empty:
return frame_level_df
video_info_list = []
for _, single_frame_level_df in frame_level_df.groupby(['camera_id', 'video_upload_datetime']):
fleet = VehicleFleet(
frame_level_df=single_frame_level_df, load_from_pd=True)
# compute the convolved IOU time series for each vehicle and smooth
fleet.compute_iou_time_series(interval=self.iou_convolution_window)
fleet.smooth_iou_time_series(
smoothing_method=self.smoothing_method)
# sample plotting options
# fleet.plot_iou_time_series(fig_dir="data", fig_name="param_tuning", smoothed=True)
video_level_df = fleet.report_video_level_stats(fleet.compute_counts(),
*fleet.compute_stop_starts(self.stop_start_iou_threshold))
video_info_list.append(video_level_df)
return | pd.concat(video_info_list) | pandas.concat |
"""
Module for implementation of BHPS education data to daedalus frame.
"""
import pandas as pd
from vivarium.framework.utilities import rate_to_probability
from pathlib import Path
import random
import os
import subprocess # For running R scripts in shell.
class Employment:
""" Main class for application of employment data to BHPS."""
@property
def name(self):
return "employment"
def __repr__(self):
return "Employment()"
@staticmethod
def write_config(config):
""" Update config file with what this module needs to run.
Parameters
----------
config : vivarium.config_tree.ConfigTree
Config yaml tree for AngryMob.
Returns
-------
config : vivarium.config_tree.ConfigTree
Config yaml tree for AngryMob with added items needed for this module to run.
"""
# load in all possible job sectors to config
job_sectors = pd.read_csv("metadata/job_sector_levels.csv")
job_roles = pd.read_csv("metadata/job_role_levels.csv")
config.update({
"job_sectors": job_sectors,
"job_roles": job_roles
},
source=str(Path(__file__).resolve()))
return config
def pre_setup(self, config, simulation):
""" Load in anything required for the module to run.
Parameters
----------
config : vivarium.config_tree.ConfigTree
Config yaml tree for vivarium with the items needed for this module to run
simulation : vivarium.interface.interactive.InteractiveContext
The initiated vivarium simulation object before simulation.setup() is run.
Returns
-------
simulation : vivarium.interface.interactive.InteractiveContext
The initiated vivarium simulation object with anything needed to run the module.
E.g. rate tables.
"""
return simulation
def setup(self, builder):
""" Method for initialising the employment module.
Parameters
----------
builder : vivarium.builder
Vivarium's control object. Stores all simulation metadata and allows modules to use it.
"""
# load in any data
self.builder = builder
self.config = builder.configuration
self.job_sectors = self.config.job_sectors
self.job_roles = self.config.job_roles
# create new columns to be added by this module
columns_created = ["labour_duration", "job_duration", "role_duration"]
view_columns = columns_created +['pidp',
'age',
'sex',
'education_state',
'ethnicity',
'alive',
'time',
'depression_state',
'labour_state',
'job_industry',
'job_occupation',
'job_sec',
'job_duration_m',
'job_duration_y']
self.population_view = builder.population.get_view(view_columns)
builder.population.initializes_simulants(self.on_initialize_simulants,
creates_columns=columns_created)
# register value rate producers.
# one for redeployment and role change.
#self.labour_change_rate_producer = builder.value.register_rate_producer('labour_change_rate',
# minos=self.calculate_labour_change_rate)
self.job_change_rate_producer = builder.value.register_rate_producer('job_change_rate',
source=self.calculate_job_change_rate)
self.role_change_rate_producer = builder.value.register_rate_producer('role_change_rate',
source=self.calculate_role_change_rate)
# priority 1 below death. not much point changing jobs if you're dead.
# CRN stream for the module. may be worth disabling later for "true" random employment.
self.random = builder.randomness.get_stream('employment_handler')
# registering any modifiers.
# depression modifier changes rate according to current employment
# the unemployed/ high level jobs are more likely to induce depression etc.
# adjusting so higher depression under certain employment circumstances.
self.employment_modifier1 = builder.value.register_value_modifier("depression_tier1_rate",
self.employment_depression_modifier)
self.employment_modifier0 = builder.value.register_value_modifier("depression_tier0_rate",
self.employment_depression_modifier)
# register event listeners.
# priority 1 below death.
employment_time_step = builder.event.register_listener("time_step", self.on_time_step, priority=1)
# load in any other required components
def on_initialize_simulants(self, pop_data):
""" Module for when the vivarium builder.randomness.register_simulants() is run.
Parameters
----------
pop_data: vivarium.framework.population.SimulantData
`pop_data` is a custom vivarium class for interacting with the population data frame.
It is essentially a pandas DataFrame with a few extra attributes such as the creation_time,
creation_window, and current simulation state (setup/running/etc.).
Returns
-------
None.
"""
job_sectors = self.job_sectors["job_sectors"]
job_roles = self.job_roles["job_roles"]
n = len(pop_data.index)
pop_update = pd.DataFrame({"labour_duration": 0.,
"role_duration": 0.},
index=pop_data.index)
if pop_data.user_data["sim_state"] == "setup":
#TODO initate labour state duration properly.
pass
elif pop_data.user_data["cohort_type"] == "replenishment":
pass
elif pop_data.user_data["cohort_type"] == "births":
# Default newborns to students with no labour state history.
# These are filler for US variables.
#
pop_update["labour_state"] = "Student"
pop_update["job_sec"] = 0
pop_update["job_industry"] = 0
pop_update["job_occupation"] = 0
pop_update["job_duration_m"] = 0
pop_update["job_duration_y"] = 0
self.population_view.update(pop_update)
def on_time_step(self, event):
""" What happens in the employment module on every simulation timestep.
Parameters
----------
event : vivarium.builder.event
The time_step `event` is used here. I.E whenever the simulation is stepped forwards.
Returns
-------
"""
# Grab anyone alive of working age.
pop = self.population_view.get(event.index, query="alive =='alive' and age >= 16")
self.current_year = event.time.year
# Changing labour state.
labour_change_df = self.labour_state_probabilities(pop.index)
# calculate probabilities of changing role/job sector.
job_change_df = pd.DataFrame(index=pop.index)
job_change_df["change"] = rate_to_probability(pd.DataFrame(self.job_change_rate_producer(pop.index)))
job_change_df["no_change"] = 1 - job_change_df["change"]
role_change_df = | pd.DataFrame(index=pop.index) | pandas.DataFrame |
import sys
import json
import csv
import pandas as pd
from datetime import datetime
def find_news(lang):
response = []
x =[]
values = []
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from src.compress import compress
import pandas as pd
if __name__ == '__main__':
all_file = compress.get_all_csv_file(compress.cpe_unzip_path)
new_file = list(filter(lambda x: 'export_monitordata' in x, all_file))
print(new_file)
filter_df = pd.read_csv(new_file[0], error_bad_lines=False, index_col=False)
filter_df = filter_df[(filter_df['ESN'] == 'YRE7S17B25000055') | (filter_df['ESN'] == 'YRE7S18209004018') | (
filter_df['ESN'] == 'YRE7S17B25003854')]
for f in new_file[1:]:
print(f)
df = | pd.read_csv(f, error_bad_lines=False, index_col=False) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: | pd.Timestamp("2013-05-22 00:00:00") | pandas.Timestamp |
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
from typing import Tuple
from .config import _dir
_data_dir = '%s/input' % _dir
cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
positive_control = 1108
negative_control = 1138
nsirna = 1108 # excluding 30 positive_control + 1 negative_control
plate_shape = (14, 22)
def set_data_dir(d):
global _data_dir
_data_dir = d
def load_header(set_type):
"""
Args:
set_type (str): train or test
id_code experiment plate well sirna well_type cell_type
HEPG2-01_1_B03 HEPG2-01 1 B03 513 posotive_control HEPG2
"""
df = pd.read_csv('%s/%s.csv' % (_data_dir, set_type))
df_controls = pd.read_csv('%s/%s_controls.csv' % (_data_dir, set_type))
if set_type == 'train':
df.insert(5, 'well_type', 'train')
else:
df.insert(4, 'sirna', -1)
df.insert(5, 'well_type', 'test')
df_all = | pd.concat([df, df_controls], sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([ | pd.Timestamp('2011-01-01', tz=tz) | pandas.Timestamp |
"""
Normals Interface Class
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
from copy import copy
from typing import Union
from datetime import datetime
import numpy as np
import pandas as pd
from meteostat.core.cache import get_file_path, file_in_cache
from meteostat.core.loader import processing_handler, load_handler
from meteostat.core.warn import warn
from meteostat.utilities.aggregations import weighted_average
from meteostat.interface.base import Base
from meteostat.interface.point import Point
class Normals(Base):
"""
Retrieve climate normals for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = 'normals'
# The list of weather Stations
_stations: pd.Index = None
# The first year of the period
_start: int = None
# The last year of the period
_end: int = None
# The data frame
_data: pd.DataFrame = pd.DataFrame()
# Columns
_columns: list = [
'start',
'end',
'month',
'tmin',
'tmax',
'prcp',
'wspd',
'pres',
'tsun'
]
# Index of first meteorological column
_first_met_col = 3
# Data types
_types: dict = {
'tmin': 'float64',
'tmax': 'float64',
'prcp': 'float64',
'wspd': 'float64',
'pres': 'float64',
'tsun': 'float64'
}
def _load(
self,
station: str
) -> None:
"""
Load file from Meteostat
"""
# File name
file = f'normals/{station}.csv.gz'
# Get local file path
path = get_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint,
file,
self._columns,
self._types,
None)
if df.index.size > 0:
# Add weather station ID
df['station'] = station
# Set index
df = df.set_index(['station', 'start', 'end', 'month'])
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Filter time period and append to DataFrame
if df.index.size > 0 and self._end:
# Get time index
end = df.index.get_level_values('end')
# Filter & return
return df.loc[end == self._end]
return df
def _get_data(self) -> None:
"""
Get all required data
"""
if len(self._stations) > 0:
# List of datasets
datasets = []
for station in self._stations:
datasets.append((
str(station),
))
# Data Processing
return processing_handler(
datasets, self._load, self.processes, self.threads)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
def _resolve_point(
self,
method: str,
stations: pd.DataFrame,
alt: int,
adapt_temp: bool
) -> None:
"""
Project weather station data onto a single point
"""
if self._stations.size == 0 or self._data.size == 0:
return None
def adjust_temp(data: pd.DataFrame):
"""
Adjust temperature-like data based on altitude
"""
data.loc[data['tmin'] != np.NaN, 'tmin'] = data['tmin'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmax'] != np.NaN, 'tmax'] = data['tmax'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
return data
if method == 'nearest':
if adapt_temp:
# Join elevation of involved weather stations
data = self._data.join(
stations['elevation'], on='station')
# Adapt temperature-like data based on altitude
data = adjust_temp(data)
# Drop elevation & round
data = data.drop('elevation', axis=1).round(1)
else:
data = self._data
self._data = data.groupby(level=[
'start',
'end',
'month'
]).agg('first')
else:
data = self._data.join(
stations[['score', 'elevation']], on='station')
# Adapt temperature-like data based on altitude
if adapt_temp:
data = adjust_temp(data)
# Aggregate mean data
data = data.groupby(level=[
'start',
'end',
'month'
]).apply(weighted_average)
# Remove obsolete index column
try:
data = data.reset_index(level=3, drop=True)
except IndexError:
pass
# Drop score and elevation
self._data = data.drop(['score', 'elevation'], axis=1).round(1)
# Set placeholder station ID
self._data['station'] = 'XXXXX'
self._data = self._data.set_index('station', append=True)
self._data = self._data.reorder_levels(
['station', 'start', 'end', 'month'])
self._stations = pd.Index(['XXXXX'])
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str],
start: int = None,
end: int = None
) -> None:
# Set list of weather stations
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
if start and end:
stations = loc.get_stations(
'monthly', datetime(
start, 1, 1), datetime(
end, 12, 31))
else:
stations = loc.get_stations()
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Check period
if (start and end) and (end - start != 29 or end %
10 != 0 or end >= datetime.now().year):
raise ValueError('Invalid reference period')
# Set period
self._start = start
self._end = end
# Get data for all weather stations
self._data = self._get_data()
# Interpolate data
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache
if self.max_age > 0 and self.autoclean:
self.clear_cache()
def normalize(self):
"""
Normalize the DataFrame
"""
# Create temporal instance
temp = copy(self)
if self.count() == 0:
warn('Pointless normalization of empty DataFrame')
# Go through list of weather stations
for station in temp._stations:
# The list of periods
periods: pd.Index = pd.Index([])
# Get periods
if self.count() > 0:
periods = temp._data[temp._data.index.get_level_values(
'station') == station].index.unique('end')
elif periods.size == 0 and self._end:
periods = | pd.Index([self._end]) | pandas.Index |
import streamlit as st
import requests
import numpy as np
import pandas as pd
import os
import json
import re
from datetime import datetime
class UserData:
def getUserInfo():
st.title('Instagram Dashboard')
with st.form(key='my_form'):
#gets a text input
username = st.text_input(label='Enter User Name')
#creates a submit button
submit_button = st.form_submit_button(label='Submit')
if submit_button:
if not os.path.exists(f'{username}'):
os.system(f'instagram-scraper "{username}" --profile-metadata --media-metadata --media-types none')
js = json.load(open(f'{username}/{username}.json', encoding='utf-8'))
df = pd.DataFrame(js['GraphImages'])
#get the profile_pic_url from json
prof_pic = js['GraphProfileInfo']['info']['profile_pic_url']
#download the image in a folder called static I created
response = requests.get(prof_pic)
with open("static/image.jpg", "wb") as f:
f.write(response.content)
df['likes']=df['edge_media_preview_like'].apply(lambda x: x['count'])
df['comments']=df['edge_media_to_comment'].apply(lambda x: x['count'])
engagement_rate=(((df['likes'].sum()+df['comments'].sum())/len(df))/js['GraphProfileInfo']['info']['followers_count'])*100
df['date']=df['taken_at_timestamp'].apply(datetime.fromtimestamp)
df['dayofweek']=df['date'].dt.dayofweek
df['month']=df['date'].dt.month
df['week']=df['date'].dt.week
df['year']=df['date'].dt.year
df['ym']=df['year'].astype(str)+df['month'].astype(str)
df['dayofweek']=df['dayofweek'].replace([0,1,2,3,4,5,6],['Mon.', 'Tue.', 'Wed.','Thu.','Fri.','Sat.','Sun.'])
col1, col2 = st.beta_columns(2)
col1.image("static/image.jpg")
col2.write(f"Full Name: {js['GraphProfileInfo']['info']['full_name']}")
col2.write(f"Biography: {js['GraphProfileInfo']['info']['biography']}")
col2.write(f"Is Business Account: {js['GraphProfileInfo']['info']['is_business_account']}")
col2.write(f"Number Of Posts: {js['GraphProfileInfo']['info']['posts_count']}")
col2.write(f"Average Posts Per Month: {round(df.groupby('ym').size().mean(),2)}")
col2.write(f"Engagement Rate: {round(engagement_rate,2)}%")
x=df.groupby('dayofweek').size()
st.subheader('Number Of Posts Per Week-Day')
st.bar_chart( | pd.DataFrame(x) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas
import math
import sys
import numbers
import argparse
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import time
from Dictionaries import countryCodes
from Dictionaries import currencyCodes
from Dictionaries import kickstarterCategories
from sklearn.preprocessing import StandardScaler
################################prepare data for use in scikit-learn###############################################
kData = pandas.read_csv("Kickstarter_Altered_Tester.csv", dtype = {"goal": float, "state": int, "disable_communication": int, "country": object, "currency": object, "staff_pick": int, "category": object,})
kDataInput = pandas.read_csv("Kickstarter_Altered_Tester_Blank.csv", dtype = {"goal": float, "disable_communication": int, "country": object, "currency": object, "staff_pick": int, "category": object,})
#collapse all textual columns to numerical
goalList = kData['goal'].tolist()
stateList = kData['state'].tolist()
disComList = kData['disable_communication'].tolist()
countryList = kData['country'].tolist()
for index, val in enumerate(countryList):
countryList[index] = countryCodes[val]
currencyList = kData['currency'].tolist()
for index, val in enumerate(currencyList):
currencyList[index] = currencyCodes[val]
staffPickList = kData['staff_pick'].tolist()
categoryList = kData['category'].tolist()
for index, val in enumerate(categoryList):
categoryList[index] = kickstarterCategories[val]
kData['goalN'] = kData['goal'];
kData['stateN'] = kData['state'];
kData['disable_communicationN'] = kData['disable_communication'];
kData['countryN']= | pandas.Series(countryList) | pandas.Series |
import os
import pickle
import serpent
from threading import Lock
from tempfile import NamedTemporaryFile
from contextlib import suppress
import numpy as np
import pandas as pd
from astroquery.utils.tap.core import TapPlus
from astropy.coordinates import SkyCoord
import Pyro5.server
from Pyro5.api import Proxy, register_class_to_dict, register_dict_to_class
from huntsman.drp.base import HuntsmanBase
from huntsman.drp.utils.pyro import NameServer, PyroService, astropy_to_dict, dict_to_astropy
PYRO_NAME = "refcat"
# Register pyro serialisers for astropy SkyCoord objects
register_class_to_dict(SkyCoord, astropy_to_dict)
register_dict_to_class("astropy_yaml", dict_to_astropy)
class TapReferenceCatalogue(HuntsmanBase):
""" Class to download reference catalogues using Table Access Protocol (TAP). """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._initialise()
def _initialise(self):
# Extract attribute values from config
self._cone_search_radius = self.config["refcat"]["cone_search_radius"]
self._ra_key = self.config["refcat"]["ra_key"]
self._dec_key = self.config["refcat"]["dec_key"]
self._unique_key = self.config["refcat"]["unique_source_key"]
self._tap_url = self.config["refcat"]["tap_url"]
self._tap_table = self.config["refcat"]["tap_table"]
self._tap_limit = self.config["refcat"].get("tap_limit", None)
self._parameter_ranges = self.config["refcat"]["parameter_ranges"]
# Create the tap object
self._tap = TapPlus(url=self._tap_url)
def cone_search(self, coord, filename, radius_degrees=None):
""" Query the reference catalogue, saving output to a .csv file.
Args:
coord (astropy.coordinates.SkyCoord): The central coordinate.
filename (str): Filename of the returned .csv file.
radius_degrees (float, optional): Override search radius from config.
Returns:
pd.DataFrame: The source catalogue.
"""
ra = coord.ra.to_value("deg")
dec = coord.dec.to_value("deg")
if radius_degrees is None:
radius_degrees = self._cone_search_radius
query = f"SELECT * FROM {self._tap_table}"
# Apply cone search
query += (f" WHERE 1=CONTAINS(POINT('ICRS', {self._ra_key}, {self._dec_key}),"
f" CIRCLE('ICRS', {ra}, {dec}, {radius_degrees}))")
# Apply parameter ranges
for param, prange in self._parameter_ranges.items():
with suppress(KeyError):
query += f" AND {param} >= {prange['lower']}"
with suppress(KeyError):
query += f" AND {param} < {prange['upper']}"
with suppress(KeyError):
query += f" AND {param} = {prange['equal']}"
# Apply limit on number of returned rows
if self._tap_limit is not None:
query += f" LIMIT {int(self._tap_limit)}"
# Start the query
self.logger.debug(f"Cone search command: {query}.")
self._tap.launch_job_async(query, dump_to_file=True, output_format="csv",
output_file=filename)
return pd.read_csv(filename)
def make_reference_catalogue(self, coords, filename=None, **kwargs):
""" Create the master reference catalogue with no source duplications.
Args:
coords (list of astropy.coordinates.SkyCoord): The central coordinates of each
exposure.
filename (string, optional): Filename to save output catalogue.
Returns:
pandas.DataFrame: The reference catalogue.
"""
result = None
with NamedTemporaryFile(delete=True) as tempfile:
for coord in coords:
# Do the cone search and get result
df = self.cone_search(coord, filename=tempfile.name, **kwargs)
# First iteration
if result is None:
result = df
continue
# Remove existing sources & concat
is_new = np.isin(df[self._unique_key].values, result[self._unique_key].values,
invert=True)
result = | pd.concat([result, df[is_new]], ignore_index=False) | pandas.concat |
#!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
@File : Stress_detection_script.py
@Time : 2022/03/17 09:45:59
@Author : <NAME>
@Contact : <EMAIL>
'''
import os
import logging
import plotly.express as px
import numpy as np
import pandas as pd
import zipfile
import fnmatch
import flirt.reader.empatica
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime, timedelta
import cvxopt as cv
from neurokit2 import eda_phasic
from matplotlib.font_manager import FontProperties
import matplotlib.dates as mdates
# rootPath = r"./"
# pattern = '*.zip'
rootPath = input("Enter Folder Path : ")
pattern = input("Enter File Name : ")
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
print(os.path.join(root, filename))
zipfile.ZipFile(os.path.join(root, filename)).extractall(
os.path.join(root, os.path.splitext(filename)[0]))
dir = os.path.splitext(pattern)[0]
# os.listdir(dir)
class process:
def moving_avarage_smoothing(X, k, description_str):
S = np.zeros(X.shape[0])
for t in tqdm(range(X.shape[0]), desc=description_str):
if t < k:
S[t] = np.mean(X[:t+1])
else:
S[t] = np.sum(X[t-k:t])/k
return S
def deviation_above_mean(unit, mean_unit, std_unit):
'''
Function takes 3 arguments
unit : number of Standard deviations above the mean
mean_unit : mean value of each signal
std_unit : standard deviation of each signal
'''
if unit == 0:
return (mean_unit)
else:
return (mean_unit + (unit*std_unit))
def Starting_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
starting_time_index = []
for i in range(len(column)-1): #iterating till the end of the array
if column[i] < deviation_metric and column[i+1] > deviation_metric: # checking if the n+1 element is greater than nth element to conclude if the signal is increasing
starting_time_index.append(time_frames[i]) #appending the timestamp's index to the declared empty array
return starting_time_index
def Ending_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
time_index = []
for i in range(len(column)-1):
if column[i] > deviation_metric and column[i+1] < deviation_metric: # checking if the n+1 element is lesser than nth element to conclude if the signal is decreasing
time_index.append(time_frames[i])
if column[len(column) - 1] > deviation_metric: # checking for hanging ends, where the signal stops abruptly
time_index.insert(
len(time_index), time_frames[len(time_frames) - 1]) # inserting the timestamp's index to the last index of the array
else:
pass
return time_index
def Extract_HRV_Information():
global hrv_features # declaring global to get access them for combined plot function
global hrv_events_df # declaring global to get access them for combined plot function
ibi = pd.read_csv(rootPath+'/'+dir+'\IBI.csv')
mean_ibi = ibi[' IBI'].mean()
average_heart_rate = 60/mean_ibi
print('mean ibi is :', mean_ibi)
print('mean heart rate :', average_heart_rate.round())
ibis = flirt.reader.empatica.read_ibi_file_into_df(
rootPath+'/'+dir + '\IBI.csv')
hrv_features = flirt.get_hrv_features(
ibis['ibi'], 128, 1, ["td", "fd"], 0.2)
hrv_features = hrv_features.dropna(how='any', axis=0)
hrv_features.reset_index(inplace=True)
hrv_features['datetime'] = hrv_features['datetime'].dt.tz_convert('US/Eastern')
hrv_features['datetime'] = pd.to_datetime(hrv_features['datetime'])
hrv_features['datetime'] = hrv_features['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
# smoothing the curve
print('\n', '******************** Smoothing The Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
hrv_features['hrv_rmssd'], 500, "Processing HRV Data")
hrv_features['MAG_K500'] = MAG_K500
# hrv_features.to_csv("./Metadata/"+ dir+"_HRV.csv")
# hrv_features.to_csv(os.path.join('./Metadata'+dir+'_HRV.csv'))
mean_rmssd = hrv_features['hrv_rmssd'].mean()
std_rmssd = hrv_features['hrv_rmssd'].std()
# getting the starting and ending time of of the signal
starting_timestamp = process.Starting_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
ending_timestamp = process.Ending_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
# in the below if case i am assuming that there was no events that crossed the threshold
if len(starting_timestamp) < 1:
fig, ax1 = plt.subplots(figsize=(30, 10))
ax1.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
# fig.savefig('./Plots/HRV_figure.png')
else:
#check if the len of starting timestamps and ending timestamps are equal if not popping the last element of the ending timestamp
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
else:
pass
difference = [] # empty array to see how long the event lasts in seconds
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i) #subtracting ending timestamp - starting timestamp to get difference in seconds
for i in difference:
time_delta_minutes.append(i.total_seconds()/60) # converting the second's difference to minuted
time_delta_minutes
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 5.00: #checking if the each episode is more then 5 minutes
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
hrv_events_df = pd.concat(frames, axis=1)
hrv_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
hrv_events_df['Starting Timestamp'] = hrv_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S") #converting it to Y:M:D H:M:S to ignore nanoseconds in timestamp dataframe
hrv_events_df['Ending Timestamp'] = hrv_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
hrv_events_df = hrv_events_df.loc[desired_time_index, :] # selecting only the timestamps which crosses the time threshold limit
fig, ax = plt.subplots(figsize=(20, 6))
ax.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
for d in hrv_events_df.index:
ax.axvspan(hrv_events_df['Starting Timestamp'][d], hrv_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax.relim()
ax.autoscale_view()
# fig.savefig('./Plots/HRV_figure.png')
return hrv_features, hrv_events_df
def Extract_ACC_Infromation():
global acc_df
global acc_events_df
acc_df = pd.read_csv(rootPath+'/'+dir + '/ACC.csv')
acc_df = flirt.reader.empatica.read_acc_file_into_df(
rootPath+'/'+dir + '/ACC.csv')
acc_df['Magnitude'] = np.sqrt(
acc_df['acc_x']**2 + acc_df['acc_y']**2 + acc_df['acc_z']**2)
print("Magnitude Mean : ", acc_df['Magnitude'].mean())
acc_df.reset_index(inplace=True)
acc_df['datetime'] = acc_df['datetime'].dt.tz_convert('US/Eastern')
acc_df['datetime'] = pd.to_datetime(acc_df['datetime'])
acc_df['datetime'] = acc_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
print('\n', '******************** Smoothing The ACC Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
acc_df['Magnitude'], 15000, "Processing ACC Data")
acc_df['MAG_K500'] = MAG_K500
# acc_df.to_csv("./Metadata/"+ dir+"_ACC.csv")
mean_acc_magnitude = acc_df['Magnitude'].mean()
std_acc_magnitude = acc_df['Magnitude'].std()
print("Average Magnitude of the Acc Data : ", mean_acc_magnitude)
starting_timestamp = process.Starting_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
ending_timestamp = process.Ending_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(acc_df['datetime'], acc_df['MAG_K500'], color='red')
fig.savefig('./Plots/ACC_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = | pd.DataFrame(starting_timestamp) | pandas.DataFrame |
from __future__ import print_function
import caffe
import sys
import os
import random
import numpy as np
import pandas as pd
import cv2
import pickle
# If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path.
model_root = "/datasets_1/sagarj/BellLabs/caffe_models/places/"
imagenet_mean = model_root + 'places205CNN_mean.binaryproto'
logfile = "../Data/PlacesFeatExtractStreetview.txt"
#Size of images
IMAGE_WIDTH = 227
IMAGE_HEIGHT = 227
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name , 'rb') as f:
return pickle.load(f)
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
#Histogram Equalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
#Image Resizing
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
def predictImages(imgDict , classprobs, model_def , model_weights):
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)
net.blobs['data'].reshape(1,3,IMAGE_WIDTH,IMAGE_WIDTH)
result = pd.DataFrame({'key':0 , 'prime':0 , 'feats':[0]})
for k in imgDict:
augImages = imgDict[k]
primeKey = augImages[0].split('/')[-1].split('.')[0].strip()
for path in augImages:
key = path.split('/')[-1].split('.')[0].strip()
#true_label = line.split(',')[1]
#path = line.strip()
im = caffe.io.load_image(path)
net.blobs['data'].data[...] = transformer.preprocess('data', im)
net.forward()
#out1 = net.blobs['prob'].data
out2 = net.blobs['fc7'].data
out = out2
print(out.shape)
d = {'key':key , 'prime':primeKey , 'feats':out.tolist()}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 15:49:09 2019
@author: d
"""
print("Running 'wrangle.py'...")
import numpy as np
import pandas as pd
np.random.seed(0)
print('Beginning wrangling of training and test set')
# Loading data
df_train = pd.read_csv('../../data/raw/train_users_2.csv')
df_test = pd.read_csv('../../data/raw/test_users.csv')
target = df_train['country_destination'].values
df_train = df_train.drop(['country_destination'], axis=1)
id_test = df_test['id']
trainsize = df_train.shape[0]
# Creating a DataFrame with train+test data
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
# Removing date first booking because it does not appear in the test data
df_all = df_all.drop(['date_first_booking'], axis=1)
# Filling in NaN
df_all = df_all.fillna(-1)
### Feature Engineering ###
# date_account_created: three features, one day, one month, one year
dac = | pd.to_datetime(df_all['date_account_created']) | pandas.to_datetime |
#!/usr/bin/env python
"""
@author: cdeline
bifacial_radiance.py - module to develop radiance bifacial scenes, including gendaylit and gencumulativesky
7/5/2016 - test script based on G173_journal_height
5/1/2017 - standalone module
Pre-requisites:
This software is written for Python >3.6 leveraging many Anaconda tools (e.g. pandas, numpy, etc)
*RADIANCE software should be installed from https://github.com/NREL/Radiance/releases
*If you want to use gencumulativesky, move 'gencumulativesky.exe' from
'bifacial_radiance\data' into your RADIANCE source directory.
*If using a Windows machine you should download the Jaloxa executables at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml#Download
* Installation of bifacial_radiance from the repo:
1. Clone the repo
2. Navigate to the directory using the command prompt
3. run `pip install -e . `
Overview:
Bifacial_radiance includes several helper functions to make it easier to evaluate
different PV system orientations for rear bifacial irradiance.
Note that this is simply an optical model - identifying available rear irradiance under different conditions.
For a detailed demonstration example, look at the .ipnyb notebook in \docs\
There are two solar resource modes in bifacial_radiance: `gendaylit` uses hour-by-hour solar
resource descriptions using the Perez diffuse tilted plane model.
`gencumulativesky` is an annual average solar resource that combines hourly
Perez skies into one single solar source, and computes an annual average.
bifacial_radiance includes five object-oriented classes:
RadianceObj: top level class to work on radiance objects, keep track of filenames,
sky values, PV module type etc.
GroundObj: details for the ground surface and reflectance
SceneObj: scene information including array configuration (row spacing, clearance or hub height)
MetObj: meteorological data from EPW (energyplus) file.
Future work: include other file support including TMY files
AnalysisObj: Analysis class for plotting and reporting
"""
import logging
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
import os, datetime
from subprocess import Popen, PIPE # replacement for os.system()
import pandas as pd
import numpy as np
import warnings
#from input import *
# Mutual parameters across all processes
#daydate=sys.argv[1]
global DATA_PATH # path to data files including module.json. Global context
#DATA_PATH = os.path.abspath(pkg_resources.resource_filename('bifacial_radiance', 'data/') )
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def _findme(lst, a): #find string match in a list. script from stackexchange
return [i for i, x in enumerate(lst) if x == a]
def _missingKeyWarning(dictype, missingkey, newvalue): # prints warnings
if type(newvalue) is bool:
valueunit = ''
else:
valueunit = 'm'
print("Warning: {} Dictionary Parameters passed, but {} is missing. ".format(dictype, missingkey))
print("Setting it to default value of {} {} to continue\n".format(newvalue, valueunit))
def _normRGB(r, g, b): #normalize by each color for human vision sensitivity
return r*0.216+g*0.7152+b*0.0722
def _popen(cmd, data_in, data_out=PIPE):
"""
Helper function subprocess.popen replaces os.system
- gives better input/output process control
usage: pass <data_in> to process <cmd> and return results
based on rgbeimage.py (<NAME> 2010)
"""
if type(cmd) == str:
cmd = str(cmd) # gets rid of unicode oddities
shell=True
else:
shell=False
p = Popen(cmd, bufsize=-1, stdin=PIPE, stdout=data_out, stderr=PIPE, shell=shell) #shell=True required for Linux? quick fix, but may be security concern
data, err = p.communicate(data_in)
#if err:
# return 'message: '+err.strip()
#if data:
# return data. in Python3 this is returned as `bytes` and needs to be decoded
if err:
if data:
returntuple = (data.decode('latin1'), 'message: '+err.decode('latin1').strip())
else:
returntuple = (None, 'message: '+err.decode('latin1').strip())
else:
if data:
returntuple = (data.decode('latin1'), None) #Py3 requires decoding
else:
returntuple = (None, None)
return returntuple
def _interactive_load(title=None):
# Tkinter file picker
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring window into foreground
return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir
def _interactive_directory(title=None):
# Tkinter directory picker. Now Py3.6 compliant!
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring to front
return filedialog.askdirectory(parent=root, title=title)
def _modDict(originaldict, moddict, relative=False):
'''
Compares keys in originaldict with moddict and updates values of
originaldict to moddict if existing.
Parameters
----------
originaldict : dictionary
Original dictionary calculated, for example frontscan or backscan dictionaries.
moddict : dictionary
Modified dictinoary, for example modscan['xstart'] = 0 to change position of x.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
originaldict : dictionary
Updated original dictionary with values from moddict.
'''
newdict = originaldict.copy()
for key in moddict:
try:
if relative:
newdict[key] = moddict[key] + newdict[key]
else:
newdict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return newdict
def _heightCasesSwitcher(sceneDict, preferred='hub_height', nonpreferred='clearance_height'):
"""
Parameters
----------
sceneDict : dictionary
Dictionary that might contain more than one way of defining height for
the array: `clearance_height`, `hub_height`, `height`*
* height deprecated from sceneDict. This function helps choose
* which definition to use.
preferred : str, optional
When sceneDict has hub_height and clearance_height, or it only has height,
it will leave only the preferred option.. The default is 'hub_height'.
nonpreferred : TYPE, optional
When sceneDict has hub_height and clearance_height,
it wil ldelete this nonpreferred option. The default is 'clearance_height'.
Returns
-------
sceneDict : TYPE
Dictionary now containing the appropriate definition for system height.
use_clearanceheight : Bool
Helper variable to specify if dictionary has only clearancehet for
use inside `makeScene1axis`. Will get deprecated once that internal
function is streamlined.
"""
# TODO: When we update to python 3.9.0, this could be a Switch Cases (Structural Pattern Matching):
heightCases = '_'
if 'height' in sceneDict:
heightCases = heightCases+'height__'
if 'clearance_height' in sceneDict:
heightCases = heightCases+'clearance_height__'
if 'hub_height' in sceneDict:
heightCases = heightCases+'hub_height__'
use_clearanceheight = False
# CASES:
if heightCases == '_height__':
print("sceneDict Warning: 'height' is being deprecated. "+
"Renaming as "+preferred)
sceneDict[preferred]=sceneDict['height']
del sceneDict['height']
elif heightCases == '_clearance_height__':
#print("Using clearance_height.")
use_clearanceheight = True
elif heightCases == '_hub_height__':
#print("Using hub_height.'")
pass
elif heightCases == '_height__clearance_height__':
print("sceneDict Warning: 'clearance_height and 'height' "+
"(deprecated) are being passed. removing 'height' "+
"from sceneDict for this tracking routine")
del sceneDict['height']
use_clearanceheight = True
elif heightCases == '_height__hub_height__':
print("sceneDict Warning: 'height' is being deprecated. Using 'hub_height'")
del sceneDict['height']
elif heightCases == '_height__clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height', 'clearance_height'"+
", and 'height' are being passed. Removing 'height'"+
" (deprecated) and "+ nonpreferred+ ", using "+preferred)
del sceneDict[nonpreferred]
elif heightCases == '_clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height' and 'clearance_height'"+
" are being passed. Using "+preferred+
" and removing "+ nonpreferred)
del sceneDict[nonpreferred]
else:
print ("sceneDict Error! no argument in sceneDict found "+
"for 'hub_height', 'height' nor 'clearance_height'. "+
"Exiting routine.")
return sceneDict, use_clearanceheight
def _is_leap_and_29Feb(s): # Removes Feb. 29 if it a leap year.
return (s.index.year % 4 == 0) & \
((s.index.year % 100 != 0) | (s.index.year % 400 == 0)) & \
(s.index.month == 2) & (s.index.day == 29)
def _subhourlydatatoGencumskyformat(gencumskydata, label='right'):
# Subroutine to resample, pad, remove leap year and get data in the
# 8760 hourly format
# for saving the temporary files for gencumsky in _saveTempTMY and
# _makeTrackerCSV
#Resample to hourly. Gencumsky wants right-labeled data.
gencumskydata = gencumskydata.resample('60T', closed='right', label='right').mean()
if label == 'left': #switch from left to right labeled by adding an hour
gencumskydata.index = gencumskydata.index + pd.to_timedelta('1H')
# Padding
tzinfo = gencumskydata.index.tzinfo
padstart = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0],1,1,1,0 ) ).tz_localize(tzinfo)
padend = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0]+1,1,1,0,0) ).tz_localize(tzinfo)
gencumskydata.iloc[0] = 0 # set first datapt to zero to forward fill w zeros
gencumskydata.iloc[-1] = 0 # set last datapt to zero to forward fill w zeros
# check if index exists. I'm sure there is a way to do this backwards.
if any(gencumskydata.index.isin([padstart])):
print("Data starts on Jan. 01")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padstart]))
gencumskydata=pd.concat([gencumskydata,pd.DataFrame(index=[padstart])])
if any(gencumskydata.index.isin([padend])):
print("Data ends on Dec. 31st")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padend]))
gencumskydata=pd.concat([gencumskydata, pd.DataFrame(index=[padend])])
gencumskydata.loc[padstart]=0
gencumskydata.loc[padend]=0
gencumskydata=gencumskydata.sort_index()
# Fill empty timestamps with zeros
gencumskydata = gencumskydata.resample('60T').asfreq().fillna(0)
# Mask leap year
leapmask = ~(_is_leap_and_29Feb(gencumskydata))
gencumskydata = gencumskydata[leapmask]
if (gencumskydata.index.year[-1] == gencumskydata.index.year[-2]+1) and len(gencumskydata)>8760:
gencumskydata = gencumskydata[:-1]
return gencumskydata
# end _subhourlydatatoGencumskyformat
class RadianceObj:
"""
The RadianceObj top level class is used to work on radiance objects,
keep track of filenames, sky values, PV module configuration, etc.
Parameters
----------
name : text to append to output files
filelist : list of Radiance files to create oconv
nowstr : current date/time string
path : working directory with Radiance materials and objects
Methods
-------
__init__ : initialize the object
_setPath : change the working directory
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, name=None, path=None, hpc=False):
'''
initialize RadianceObj with path of Radiance materials and objects,
as well as a basename to append to
Parameters
----------
name: string, append temporary and output files with this value
path: location of Radiance materials and objects
hpc: Keeps track if User is running simulation on HPC so some file
reading routines try reading a bit longer and some writing
routines (makeModule) that overwrite themselves are inactivated.
Returns
-------
none
'''
self.metdata = {} # data from epw met file
self.data = {} # data stored at each timestep
self.path = "" # path of working directory
self.name = "" # basename to append
#self.filelist = [] # list of files to include in the oconv
self.materialfiles = [] # material files for oconv
self.skyfiles = [] # skyfiles for oconv
self.radfiles = [] # scene rad files for oconv
self.octfile = [] #octfile name for analysis
self.Wm2Front = 0 # cumulative tabulation of front W/m2
self.Wm2Back = 0 # cumulative tabulation of rear W/m2
self.backRatio = 0 # ratio of rear / front Wm2
self.nMods = None # number of modules per row
self.nRows = None # number of rows per scene
self.hpc = hpc # HPC simulation is being run. Some read/write functions are modified
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
# DEFAULTS
if name is None:
self.name = self.nowstr # set default filename for output files
else:
self.name = name
self.basename = name # add backwards compatibility for prior versions
#self.__name__ = self.name #optional info
#self.__str__ = self.__name__ #optional info
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
# load files in the /materials/ directory
self.materialfiles = self.returnMaterialFiles('materials')
def _setPath(self, path):
"""
setPath - move path and working directory
"""
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
# check for path in the new Radiance directory:
def _checkPath(path): # create the file structure if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
_checkPath('images'); _checkPath('objects')
_checkPath('results'); _checkPath('skies'); _checkPath('EPWs')
# if materials directory doesn't exist, populate it with ground.rad
# figure out where pip installed support files.
from shutil import copy2
if not os.path.exists('materials'): #copy ground.rad to /materials
os.makedirs('materials')
print('Making path: materials')
copy2(os.path.join(DATA_PATH, 'ground.rad'), 'materials')
# if views directory doesn't exist, create it with two default views - side.vp and front.vp
if not os.path.exists('views'):
os.makedirs('views')
with open(os.path.join('views', 'side.vp'), 'w') as f:
f.write('rvu -vtv -vp -10 1.5 3 -vd 1.581 0 -0.519234 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
with open(os.path.join('views', 'front.vp'), 'w') as f:
f.write('rvu -vtv -vp 0 -3 5 -vd 0 0.894427 -0.894427 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
def getfilelist(self):
"""
Return concat of matfiles, radfiles and skyfiles
"""
return self.materialfiles + self.skyfiles + self.radfiles
def save(self, savefile=None):
"""
Pickle the radiance object for further use.
Very basic operation - not much use right now.
Parameters
----------
savefile : str
Optional savefile name, with .pickle extension.
Otherwise default to save.pickle
"""
import pickle
if savefile is None:
savefile = 'save.pickle'
with open(savefile, 'wb') as f:
pickle.dump(self, f)
print('Saved to file {}'.format(savefile))
#def setHPC(self, hpc=True):
# self.hpc = hpc
def addMaterial(self, material, Rrefl, Grefl, Brefl, materialtype='plastic',
specularity=0, roughness=0, material_file=None, comment=None, rewrite=True):
"""
Function to add a material in Radiance format.
Parameters
----------
material : str
DESCRIPTION.
Rrefl : str
Reflectivity for first wavelength, or 'R' bin.
Grefl : str
Reflecstrtivity for second wavelength, or 'G' bin.
Brefl : str
Reflectivity for third wavelength, or 'B' bin.
materialtype : str, optional
Type of material. The default is 'plastic'. Others can be mirror,
trans, etc. See RADIANCe documentation.
specularity : str, optional
Ratio of reflection that is specular and not diffuse. The default is 0.
roughness : str, optional
This is the microscopic surface roughness: the more jagged the
facets are, the rougher it is and more blurry reflections will appear.
material_file : str, optional
DESCRIPTION. The default is None.
comment : str, optional
DESCRIPTION. The default is None.
rewrite : str, optional
DESCRIPTION. The default is True.
Returns
-------
None. Just adds the material to the material_file specified or the
default in ``materials\ground.rad``.
References:
See examples of documentation for more materialtype details.
http://www.jaloxa.eu/resources/radiance/documentation/docs/radiance_tutorial.pdf page 10
Also, you can use https://www.jaloxa.eu/resources/radiance/colour_picker.shtml
to have a sense of how the material would look with the RGB values as
well as specularity and roughness.
To understand more on reflectivity, specularity and roughness values
https://thinkmoult.com/radiance-specularity-and-roughness-value-examples.html
"""
if material_file is None:
material_file = 'ground.rad'
matfile = os.path.join('materials', material_file)
with open(matfile, 'r') as fp:
buffer = fp.readlines()
# search buffer for material matching requested addition
found = False
for i in buffer:
if materialtype and material in i:
loc = buffer.index(i)
found = True
break
if found:
if rewrite:
print('Material exists, overwriting...\n')
if comment is None:
pre = loc - 1
else:
pre = loc - 2
# commit buffer without material match
with open(matfile, 'w') as fp:
for i in buffer[0:pre]:
fp.write(i)
for i in buffer[loc+4:]:
fp.write(i)
if (found and rewrite) or (not found):
# append -- This will create the file if it doesn't exist
file_object = open(matfile, 'a')
file_object.write("\n\n")
if comment is not None:
file_object.write("#{}".format(comment))
file_object.write("\nvoid {} {}".format(materialtype, material))
if materialtype == 'glass':
file_object.write("\n0\n0\n3 {} {} {}".format(Rrefl, Grefl, Brefl))
else:
file_object.write("\n0\n0\n5 {} {} {} {} {}".format(Rrefl, Grefl, Brefl, specularity, roughness))
file_object.close()
print('Added material {} to file {}'.format(material, material_file))
if (found and not rewrite):
print('Material already exists\n')
def exportTrackerDict(self, trackerdict=None,
savefile=None, reindex=None):
"""
Use :py:func:`~bifacial_radiance.load._exportTrackerDict` to save a
TrackerDict output as a csv file.
Parameters
----------
trackerdict
The tracker dictionary to save
savefile : str
path to .csv save file location
reindex : bool
True saves the trackerdict in TMY format, including rows for hours
where there is no sun/irradiance results (empty)
"""
import bifacial_radiance.load
if trackerdict is None:
trackerdict = self.trackerdict
if savefile is None:
savefile = _interactive_load(title='Select a .csv file to save to')
if reindex is None:
if self.cumulativesky is True:
# don't re-index for cumulativesky,
# which has angles for index
reindex = False
else:
reindex = True
if self.cumulativesky is True and reindex is True:
# don't re-index for cumulativesky,
# which has angles for index
print ("\n Warning: For cumulativesky simulations, exporting the "
"TrackerDict requires reindex = False. Setting reindex = "
"False and proceeding")
reindex = False
bifacial_radiance.load._exportTrackerDict(trackerdict,
savefile,
reindex)
def loadtrackerdict(self, trackerdict=None, fileprefix=None):
"""
Use :py:class:`bifacial_radiance.load._loadtrackerdict`
to browse the results directory and load back any results saved in there.
Parameters
----------
trackerdict
fileprefix : str
"""
from bifacial_radiance.load import loadTrackerDict
if trackerdict is None:
trackerdict = self.trackerdict
(trackerdict, totaldict) = loadTrackerDict(trackerdict, fileprefix)
self.Wm2Front = totaldict['Wm2Front']
self.Wm2Back = totaldict['Wm2Back']
def returnOctFiles(self):
"""
Return files in the root directory with `.oct` extension
Returns
-------
oct_files : list
List of .oct files
"""
oct_files = [f for f in os.listdir(self.path) if f.endswith('.oct')]
#self.oct_files = oct_files
return oct_files
def returnMaterialFiles(self, material_path=None):
"""
Return files in the Materials directory with .rad extension
appends materials files to the oconv file list
Parameters
----------
material_path : str
Optional parameter to point to a specific materials directory.
otherwise /materials/ is default
Returns
-------
material_files : list
List of .rad files
"""
if material_path is None:
material_path = 'materials'
material_files = [f for f in os.listdir(os.path.join(self.path,
material_path)) if f.endswith('.rad')]
materialfilelist = [os.path.join(material_path, f) for f in material_files]
self.materialfiles = materialfilelist
return materialfilelist
def setGround(self, material=None, material_file=None):
"""
Use GroundObj constructor class and return a ground object
Parameters
------------
material : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
self.ground : tuple
self.ground.normval : numeric
Normalized color value
self.ground.ReflAvg : numeric
Average reflectance
"""
if material is None:
try:
if self.metdata.albedo is not None:
material = self.metdata.albedo
print(" Assigned Albedo from metdata.albedo")
except:
pass
self.ground = GroundObj(material, material_file)
def getEPW(self, lat=None, lon=None, GetAll=False):
"""
Subroutine to download nearest epw files to latitude and longitude provided,
into the directory \EPWs\
based on github/aahoo.
.. warning::
verify=false is required to operate within NREL's network.
to avoid annoying warnings, insecurerequestwarning is disabled
currently this function is not working within NREL's network. annoying!
Parameters
----------
lat : decimal
Used to find closest EPW file.
lon : decimal
Longitude value to find closest EPW file.
GetAll : boolean
Download all available files. Note that no epw file will be loaded into memory
"""
import requests, re
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
hdr = {'User-Agent' : "Magic Browser",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
path_to_save = 'EPWs' # create a directory and write the name of directory here
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
def _returnEPWnames():
''' return a dataframe with the name, lat, lon, url of available files'''
r = requests.get('https://github.com/NREL/EnergyPlus/raw/develop/weather/master.geojson', verify=False)
data = r.json() #metadata for available files
#download lat/lon and url details for each .epw file into a dataframe
df = pd.DataFrame({'url':[], 'lat':[], 'lon':[], 'name':[]})
for location in data['features']:
match = re.search(r'href=[\'"]?([^\'" >]+)', location['properties']['epw'])
if match:
url = match.group(1)
name = url[url.rfind('/') + 1:]
lontemp = location['geometry']['coordinates'][0]
lattemp = location['geometry']['coordinates'][1]
dftemp = pd.DataFrame({'url':[url], 'lat':[lattemp], 'lon':[lontemp], 'name':[name]})
#df = df.append(dftemp, ignore_index=True)
df = pd.concat([df, dftemp], ignore_index=True)
return df
def _findClosestEPW(lat, lon, df):
#locate the record with the nearest lat/lon
errorvec = np.sqrt(np.square(df.lat - lat) + np.square(df.lon - lon))
index = errorvec.idxmin()
url = df['url'][index]
name = df['name'][index]
return url, name
def _downloadEPWfile(url, path_to_save, name):
r = requests.get(url, verify=False, headers=hdr)
if r.ok:
filename = os.path.join(path_to_save, name)
# py2 and 3 compatible: binary write, encode text first
with open(filename, 'wb') as f:
f.write(r.text.encode('ascii', 'ignore'))
print(' ... OK!')
else:
print(' connection error status code: %s' %(r.status_code))
r.raise_for_status()
# Get the list of EPW filenames and lat/lon
df = _returnEPWnames()
# find the closest EPW file to the given lat/lon
if (lat is not None) & (lon is not None) & (GetAll is False):
url, name = _findClosestEPW(lat, lon, df)
# download the EPW file to the local drive.
print('Getting weather file: ' + name)
_downloadEPWfile(url, path_to_save, name)
self.epwfile = os.path.join('EPWs', name)
elif GetAll is True:
if input('Downloading ALL EPW files available. OK? [y/n]') == 'y':
# get all of the EPW files
for index, row in df.iterrows():
print('Getting weather file: ' + row['name'])
_downloadEPWfile(row['url'], path_to_save, row['name'])
self.epwfile = None
else:
print('Nothing returned. Proper usage: epwfile = getEPW(lat,lon)')
self.epwfile = None
return self.epwfile
def readWeatherFile(self, weatherFile=None, starttime=None,
endtime=None, label=None, source=None,
coerce_year=None, tz_convert_val=None):
"""
Read either a EPW or a TMY file, calls the functions
:py:class:`~bifacial_radiance.readTMY` or
:py:class:`~bifacial_radiance.readEPW`
according to the weatherfile extention.
Parameters
----------
weatherFile : str
File containing the weather information. EPW, TMY or solargis accepted.
starttime : str
Limited start time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
endtime : str
Limited end time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
daydate : str DEPRECATED
For single day in 'MM/DD' or MM_DD format. Now use starttime and
endtime set to the same date.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
source : str
To help identify different types of .csv files. If None, it assumes
it is a TMY3-style formated data. Current options: 'TMY3',
'solargis', 'EPW'
coerce_year : int
Year to coerce weather data to in YYYY format, ie 2021.
If more than one year of data in the weather file, year is NOT coerced.
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
"""
#from datetime import datetime
import warnings
if weatherFile is None:
if hasattr(self,'epwfile'):
weatherFile = self.epwfile
else:
try:
weatherFile = _interactive_load('Select EPW or TMY3 climate file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
if coerce_year is not None:
coerce_year = int(coerce_year)
if str(coerce_year).__len__() != 4:
warnings.warn('Incorrect coerce_year. Setting to None')
coerce_year = None
def _parseTimes(t, hour, coerce_year):
'''
parse time input t which could be string mm_dd_HH or YYYY-mm-dd_HHMM
or datetime.datetime object. Return pd.datetime object. Define
hour as hour input if not passed directly.
'''
import re
if type(t) == str:
try:
tsplit = re.split('-|_| ', t)
#mm_dd format
if tsplit.__len__() == 2 and t.__len__() == 5:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
tsplit.append(str(hour).rjust(2,'0')+'00')
#mm_dd_hh or YYYY_mm_dd format
elif tsplit.__len__() == 3 :
if tsplit[0].__len__() == 2:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
elif tsplit[0].__len__() == 4:
tsplit.append(str(hour).rjust(2,'0')+'00')
#YYYY-mm-dd_HHMM format
if tsplit.__len__() == 4 and tsplit[0].__len__() == 4:
t_out = pd.to_datetime(''.join(tsplit).ljust(12,'0') )
else:
raise Exception(f'incorrect time string passed {t}.'
'Valid options: mm_dd, mm_dd_HH, '
'mm_dd_HHMM, YYYY-mm-dd_HHMM')
except Exception as e:
# Error for incorrect string passed:
raise(e)
else: #datetime or timestamp
try:
t_out = pd.to_datetime(t)
except pd.errors.ParserError:
print('incorrect time object passed. Valid options: '
'string or datetime.datetime or pd.timeIndex. You '
f'passed {type(t)}.')
return t_out, coerce_year
# end _parseTimes
def _tz_convert(metdata, metadata, tz_convert_val):
"""
convert metdata to a different local timzone. Particularly for
SolarGIS weather files which are returned in UTC by default.
----------
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
Returns: metdata, metadata
"""
import pytz
if (type(tz_convert_val) == int) | (type(tz_convert_val) == float):
metadata['TZ'] = tz_convert_val
metdata = metdata.tz_convert(pytz.FixedOffset(tz_convert_val*60))
return metdata, metadata
# end _tz_convert
if source is None:
if weatherFile[-3:].lower() == 'epw':
source = 'EPW'
else:
print('Warning: CSV file passed for input. Assuming it is TMY3'+
'style format')
source = 'TMY3'
if label is None:
label = 'right' # EPW and TMY are by deffault right-labeled.
if source.lower() == 'solargis':
if label is None:
label = 'center'
metdata, metadata = self._readSOLARGIS(weatherFile, label=label)
if source.lower() =='epw':
metdata, metadata = self._readEPW(weatherFile, label=label)
if source.lower() =='tmy3':
metdata, metadata = self._readTMY(weatherFile, label=label)
metdata, metadata = _tz_convert(metdata, metadata, tz_convert_val)
tzinfo = metdata.index.tzinfo
tempMetDatatitle = 'metdata_temp.csv'
# Parse the start and endtime strings.
if starttime is not None:
starttime, coerce_year = _parseTimes(starttime, 1, coerce_year)
starttime = starttime.tz_localize(tzinfo)
if endtime is not None:
endtime, coerce_year = _parseTimes(endtime, 23, coerce_year)
endtime = endtime.tz_localize(tzinfo)
'''
#TODO: do we really need this check?
if coerce_year is not None and starttime is not None:
if coerce_year != starttime.year or coerce_year != endtime.year:
print("Warning: Coerce year does not match requested sampled "+
"date(s)'s years. Setting Coerce year to None.")
coerce_year = None
'''
tmydata_trunc = self._saveTempTMY(metdata, filename=tempMetDatatitle,
starttime=starttime, endtime=endtime,
coerce_year=coerce_year,
label=label)
if tmydata_trunc.__len__() > 0:
self.metdata = MetObj(tmydata_trunc, metadata, label = label)
else:
self.metdata = None
raise Exception('Weather file returned zero points for the '
'starttime / endtime provided')
return self.metdata
def _saveTempTMY(self, tmydata, filename=None, starttime=None, endtime=None,
coerce_year=None, label=None):
'''
private function to save part or all of tmydata into /EPWs/ for use
in gencumsky -G mode and return truncated tmydata. Gencumsky 8760
starts with Jan 1, 1AM and ends Dec 31, 2400
starttime: tz-localized pd.TimeIndex
endtime: tz-localized pd.TimeIndex
returns: tmydata_truncated : subset of tmydata based on start & end
'''
if filename is None:
filename = 'temp.csv'
gencumskydata = None
gencumdict = None
if len(tmydata) == 8760:
print("8760 line in WeatherFile. Assuming this is a standard hourly"+
" WeatherFile for the year for purposes of saving Gencumulativesky"+
" temporary weather files in EPW folder.")
if coerce_year is None and starttime is not None:
coerce_year = starttime.year
# SILVANA: If user doesn't pass starttime, and doesn't select
# coerce_year, then do we really need to coerce it?
elif coerce_year is None:
coerce_year = 2021
print(f"Coercing year to {coerce_year}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# Correcting last index to next year.
tmydata.index.values[-1] = tmydata.index[-1] + pd.DateOffset(year=(coerce_year+1))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
starttime
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
else:
if len(tmydata.index.year.unique()) == 1:
if coerce_year:
# TODO: check why subhourly data still has 0 entries on the next day on _readTMY3
# in the meantime, let's make Silvana's life easy by just deletig 0 entries
tmydata = tmydata[~(tmydata.index.hour == 0)]
print(f"Coercing year to {coerce_year}")
# TODO: this coercing shows a python warning. Turn it off or find another method? bleh.
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
if coerce_year:
print("More than 1 year of data identified. Can't do coercing")
# Check if years are consecutive
l = list(tmydata.index.year.unique())
if l != list(range(min(l), max(l)+1)):
print("Years are not consecutive. Won't be able to use Gencumsky"+
" because who knows what's going on with this data.")
else:
print("Years are consecutive. For Gencumsky, make sure to select"+
" which yearly temporary weather file you want to use"+
" else they will all get accumulated to same hour/day")
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata = tmydata[filterdates] # Reducing years potentially
# Checking if filtering reduced to just 1 year to do usual savin.
if len(tmydata.index.year.unique()) == 1:
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
gencumdict = [g for n, g in tmydata.groupby(pd.Grouper(freq='Y'))]
for ii in range(0, len(gencumdict)):
gencumskydata = gencumdict[ii]
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
gencumdict[ii] = gencumskydata
gencumskydata = None # clearing so that the dictionary style can be activated.
# Let's save files in EPWs folder for Gencumsky
if gencumskydata is not None:
csvfile = os.path.join('EPWs', filename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile = csvfile
if gencumdict is not None:
self.gencumsky_metfile = []
for ii in range (0, len(gencumdict)):
gencumskydata = gencumdict[ii]
newfilename = filename.split('.')[0]+'_year_'+str(ii)+'.csv'
csvfile = os.path.join('EPWs', newfilename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile.append(csvfile)
return tmydata
def _readTMY(self, tmyfile=None, label = 'right', coerce_year=None):
'''
use pvlib to read in a tmy3 file.
Note: pvlib 0.7 does not currently support sub-hourly files. Until
then, use _readTMYdate() to create the index
Parameters
------------
tmyfile : str
Filename of tmy3 to be read with pvlib.tmy.readtmy3
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce to. Default is 2021.
Returns
-------
metdata - MetObj collected from TMY3 file
'''
def _convertTMYdate(data, meta):
''' requires pvlib 0.8, updated to handle subhourly timestamps '''
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data['Date (MM/DD/YYYY)'])
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data['Time (HH:MM)'].str[:2].astype(int) % 24
minute = data['Time (HH:MM)'].str[3:].astype(int)
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = (data_ymd + pd.to_timedelta(shifted_hour, unit='h') +
pd.to_timedelta(minute, unit='min') )
data = data.tz_localize(int(meta['TZ'] * 3600))
return data
import pvlib
#(tmydata, metadata) = pvlib.tmy.readtmy3(filename=tmyfile) #pvlib<=0.6
(tmydata, metadata) = pvlib.iotools.tmy.read_tmy3(filename=tmyfile,
coerce_year=coerce_year)
try:
tmydata = _convertTMYdate(tmydata, metadata)
except KeyError:
print('PVLib >= 0.8.0 is required for sub-hourly data input')
return tmydata, metadata
def _readEPW(self, epwfile=None, label = 'right', coerce_year=None):
"""
Uses readepw from pvlib>0.6.1 but un-do -1hr offset and
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Parameters
------------
epwfile : str
Direction and filename of the epwfile. If None, opens an interactive
loading window.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce data to.
"""
import pvlib
#import re
'''
NOTE: In PVLib > 0.6.1 the new epw.read_epw() function reads in time
with a default -1 hour offset. This is reflected in our existing
workflow.
'''
#(tmydata, metadata) = readepw(epwfile) #
(tmydata, metadata) = pvlib.iotools.epw.read_epw(epwfile,
coerce_year=coerce_year) #pvlib>0.6.1
#pvlib uses -1hr offset that needs to be un-done. Why did they do this?
tmydata.index = tmydata.index+pd.Timedelta(hours=1)
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
tmydata.rename(columns={'dni':'DNI',
'dhi':'DHI',
'temp_air':'DryBulb',
'wind_speed':'Wspd',
'ghi':'GHI',
'albedo':'Alb'
}, inplace=True)
return tmydata, metadata
def _readSOLARGIS(self, filename=None, label='center'):
"""
Read solarGIS data file which is timestamped in UTC.
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Timezone is always returned as UTC. Use tz_convert in readWeatherFile
to manually convert to local time
Parameters
------------
filename : str
filename of the solarGIS file.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval. SolarGis default style is center,
unless user requests a right label.
"""
# file format: anything with # preceding is in the header
header = []; lat = None; lon = None; elev = None; name = None
with open(filename, 'r') as result:
for line in result:
if line.startswith('#'):
header.append(line)
if line.startswith('#Latitude:'):
lat = line[11:]
if line.startswith('#Longitude:'):
lon = line[12:]
if line.startswith('#Elevation:'):
elev = line[12:17]
if line.startswith('#Site name:'):
name = line[12:-1]
else:
break
metadata = {'latitude':float(lat),
'longitude':float(lon),
'altitude':float(elev),
'Name':name,
'TZ':0.0}
# read in remainder of data
data = pd.read_csv(filename,skiprows=header.__len__(), delimiter=';')
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
data.rename(columns={'DIF':'DHI',
'TEMP':'DryBulb',
'WS':'Wspd',
}, inplace=True)
# Generate index from Date (DD.HH.YYYY) and Time
data.index = pd.to_datetime(data.Date + ' ' + data.Time,
dayfirst=True, utc=True,
infer_datetime_format = True)
return data, metadata
def getSingleTimestampTrackerAngle(self, metdata, timeindex, gcr=None,
azimuth=180, axis_tilt=0,
limit_angle=45, backtrack=True):
"""
Helper function to calculate a tracker's angle for use with the
fixed tilt routines of bifacial_radiance. It calculates tracker angle for
sun position at the timeindex passed (no left or right time offset,
label = 'center')
Parameters
----------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
timeindex : int
Index between 0 to 8760 indicating hour to simulate.
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
azimuth : float or int
Orientation axis of tracker torque tube. Default North-South (180 deg)
axis_tilt : float or int
Default 0. Axis tilt -- not implemented in sensors locations so it's pointless
at this release to change it.
limit_angle : float or int
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
backtrack : boolean
Whether backtracking is enabled (default = True)
"""
'''
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
timestamp = metdata.datetime[timeindex]
'''
import pvlib
solpos = metdata.solpos.iloc[timeindex]
sunzen = float(solpos.apparent_zenith)
sunaz = float(solpos.azimuth) # not substracting the 180
trackingdata = pvlib.tracking.singleaxis(sunzen, sunaz,
axis_tilt, azimuth,
limit_angle, backtrack, gcr)
tracker_theta = float(np.round(trackingdata['tracker_theta'],2))
tracker_theta = tracker_theta*-1 # bifacial_radiance uses East (morning) theta as positive
return tracker_theta
def gendaylit(self, timeindex, metdata=None, debug=False):
"""
Sets and returns sky information using gendaylit.
Uses PVLIB for calculating the sun position angles instead of
using Radiance internal sun position calculation (for that use gendaylit function)
Parameters
----------
timeindex : int
Index from 0 to ~4000 of the MetObj (daylight hours only)
metdata : ``MetObj``
MetObj object with list of dni, dhi, ghi and location
debug : bool
Flag to print output of sky DHI and DNI
Returns
-------
skyname : str
Sets as a self.skyname and returns filename of sky in /skies/ directory.
If errors exist, such as DNI = 0 or sun below horizon, this skyname is None
"""
import warnings
if metdata is None:
try:
metdata = self.metdata
except:
print('usage: pass metdata, or run after running ' +
'readWeatherfile() ')
return
ground = self.ground
locName = metdata.city
dni = metdata.dni[timeindex]
dhi = metdata.dhi[timeindex]
ghi = metdata.ghi[timeindex]
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
# Assign Albedos
try:
if ground.ReflAvg.shape == metdata.dni.shape:
groundindex = timeindex
elif self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
warnings.warn("Shape of ground Albedos and TMY data do not match.")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
if debug is True:
print('Sky generated with Gendaylit, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
print("Datetime TimeIndex", metdata.datetime[timeindex])
#Time conversion to correct format and offset.
#datetime = metdata.sunrisesetdata['corrected_timestamp'][timeindex]
#Don't need any of this any more. Already sunrise/sunset corrected and offset by appropriate interval
# get solar position zenith and azimuth based on site metadata
#solpos = pvlib.irradiance.solarposition.get_solarposition(datetimetz,lat,lon,elev)
solpos = metdata.solpos.iloc[timeindex]
sunalt = float(solpos.elevation)
# Radiance expects azimuth South = 0, PVlib gives South = 180. Must substract 180 to match.
sunaz = float(solpos.azimuth)-180.0
sky_path = 'skies'
if dhi <= 0:
self.skyfiles = [None]
return None
# We should already be filtering for elevation >0. But just in case...
if sunalt <= 0:
sunalt = np.arcsin((ghi-dhi)/(dni+.001))*180/np.pi # reverse engineer elevation from ghi, dhi, dni
print('Warning: negative sun elevation at '+
'{}. '.format(metdata.datetime[timeindex])+
'Re-calculated elevation: {:0.2}'.format(sunalt))
# Note - -W and -O1 option is used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# location name: " + str(locName) + " LAT: " + str(lat)
+" LON: " + str(lon) + " Elev: " + str(elev) + "\n"
"# Sun position calculated w. PVLib\n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
ground._makeGroundString(index=groundindex, cumulativesky=False)
time = metdata.datetime[timeindex]
#filename = str(time)[2:-9].replace('-','_').replace(' ','_').replace(':','_')
filename = time.strftime('%Y-%m-%d_%H%M')
skyname = os.path.join(sky_path,"sky2_%s_%s_%s.rad" %(lat, lon, filename))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def gendaylit2manual(self, dni, dhi, sunalt, sunaz):
"""
Sets and returns sky information using gendaylit.
Uses user-provided data for sun position and irradiance.
.. warning::
This generates the sky at the sun altitude&azimuth provided, make
sure it is the right position relative to how the weather data got
created and read (i.e. label right, left or center).
Parameters
------------
dni: int or float
Direct Normal Irradiance (DNI) value, in W/m^2
dhi : int or float
Diffuse Horizontal Irradiance (DHI) value, in W/m^2
sunalt : int or float
Sun altitude (degrees)
sunaz : int or float
Sun azimuth (degrees)
Returns
-------
skyname : string
Filename of sky in /skies/ directory
"""
print('Sky generated with Gendaylit 2 MANUAL, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
sky_path = 'skies'
if sunalt <= 0 or dhi <= 0:
self.skyfiles = [None]
return None
# Assign Albedos
try:
if self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
print("Ambiguous albedo entry, Set albedo to single value "
"in setGround()")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
# Note: -W and -O1 are used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# Manual inputs of DNI, DHI, SunAlt and SunAZ into Gendaylit used \n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, self.ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
self.ground._makeGroundString(index=groundindex, cumulativesky=False)
skyname = os.path.join(sky_path, "sky2_%s.rad" %(self.name))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def genCumSky(self, gencumsky_metfile=None, savefile=None):
"""
Generate Skydome using gencumsky.
.. warning::
gencumulativesky.exe is required to be installed,
which is not a standard radiance distribution.
You can find the program in the bifacial_radiance distribution directory
in \Lib\site-packages\bifacial_radiance\data
Use :func:`readWeatherFile(filename, starttime='YYYY-mm-dd_HHMM', endtime='YYYY-mm-dd_HHMM')`
to limit gencumsky simulations instead.
Parameters
------------
gencumsky_metfile : str
Filename with path to temporary created meteorological file usually created
in EPWs folder. This csv file has no headers, no index, and two
space separated columns with values for GHI and DNI for each hour
in the year, and MUST have 8760 entries long otherwise gencumulativesky.exe cries.
savefile : string
If savefile is None, defaults to "cumulative"
Returns
--------
skyname : str
Filename of the .rad file containing cumulativesky info
"""
# TODO: error checking and auto-install of gencumulativesky.exe
# TODO: add check if readWeatherfile has not be done
# TODO: check if it fails if gcc module has been loaded? (common hpc issue)
#import datetime
if gencumsky_metfile is None:
gencumsky_metfile = self.gencumsky_metfile
if isinstance(gencumsky_metfile, str):
print("Loaded ", gencumsky_metfile)
if isinstance(gencumsky_metfile, list):
print("There are more than 1 year of gencumsky temporal weather file saved."+
"You can pass which file you want with gencumsky_metfile input. Since "+
"No year was selected, defaulting to using the first year of the list")
gencumsky_metfile = gencumsky_metfile[0]
print("Loaded ", gencumsky_metfile)
if savefile is None:
savefile = "cumulative"
sky_path = 'skies'
lat = self.metdata.latitude
lon = self.metdata.longitude
timeZone = self.metdata.timezone
'''
cmd = "gencumulativesky +s1 -h 0 -a %s -o %s -m %s %s " %(lat, lon, float(timeZone)*15, filetype) +\
"-time %s %s -date %s %s %s %s %s" % (startdt.hour, enddt.hour+1,
startdt.month, startdt.day,
enddt.month, enddt.day,
gencumsky_metfile)
'''
cmd = (f"gencumulativesky +s1 -h 0 -a {lat} -o {lon} -m "
f"{float(timeZone)*15} -G {gencumsky_metfile}" )
with open(savefile+".cal","w") as f:
_,err = _popen(cmd, None, f)
if err is not None:
print(err)
# Assign Albedos
try:
groundstring = self.ground._makeGroundString(cumulativesky=True)
except:
raise Exception('Error: ground reflection not defined. '
'Run RadianceObj.setGround() first')
return
skyStr = "#Cumulative Sky Definition\n" +\
"void brightfunc skyfunc\n" + \
"2 skybright " + "%s.cal\n" % (savefile) + \
"0\n" + \
"0\n" + \
"\nskyfunc glow sky_glow\n" + \
"0\n" + \
"0\n" + \
"4 1 1 1 0\n" + \
"\nsky_glow source sky\n" + \
"0\n" + \
"0\n" + \
"4 0 0 1 180\n" + \
groundstring
skyname = os.path.join(sky_path, savefile+".rad")
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]#, 'SunFile.rad' ]
return skyname
def set1axis(self, metdata=None, azimuth=180, limit_angle=45,
angledelta=5, backtrack=True, gcr=1.0 / 3, cumulativesky=True,
fixed_tilt_angle=None, useMeasuredTrackerAngle=False,
axis_azimuth=None):
"""
Set up geometry for 1-axis tracking. Pull in tracking angle details from
pvlib, create multiple 8760 metdata sub-files where datetime of met data
matches the tracking angle. Returns 'trackerdict' which has keys equal to
either the tracker angles (gencumsky workflow) or timestamps (gendaylit hourly
workflow)
Parameters
------------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
azimuth : numeric
Orientation axis of tracker torque tube. Default North-South (180 deg).
For fixed-tilt configuration, input is fixed azimuth (180 is south)
limit_angle : numeric
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
angledelta : numeric
Degree of rotation increment to parse irradiance bins. Default 5 degrees.
(0.4 % error for DNI). Other options: 4 (.25%), 2.5 (0.1%).
Note: the smaller the angledelta, the more simulations must be run.
backtrack : bool
Whether backtracking is enabled (default = True)
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
cumulativesky : bool
[True] Wether individual csv files are
created with constant tilt angle for the cumulativesky approach.
if false, the gendaylit tracking approach must be used.
fixed_tilt_angle : numeric
If passed, this changes to a fixed tilt simulation where each hour
uses fixed_tilt_angle and axis_azimuth as the tilt and azimuth
useMeasuredTrackerAngle: Bool
If True, and data for tracker angles has been passed by being included
in the WeatherFile object (column name 'Tracker Angle (degrees)'),
then tracker angles will be set to these values instead of being calculated.
NOTE that the value for azimuth passed to set1axis must be surface
azimuth in the morning and not the axis_azimuth
(i.e. for a N-S HSAT, azimuth = 90).
axis_azimuth : numeric
DEPRECATED. returns deprecation warning. Pass the tracker
axis_azimuth through to azimuth input instead.
Returns
-------
trackerdict : dictionary
Keys represent tracker tilt angles (gencumsky) or timestamps (gendaylit)
and list of csv metfile, and datetimes at that angle
trackerdict[angle]['csvfile';'surf_azm';'surf_tilt';'UTCtime']
- or -
trackerdict[time]['tracker_theta';'surf_azm';'surf_tilt']
"""
# Documentation check:
# Removed Internal variables
# -------
# metdata.solpos dataframe with solar position data
# metdata.surface_azimuth list of tracker azimuth data
# metdata.surface_tilt list of tracker surface tilt data
# metdata.tracker_theta list of tracker tilt angle
import warnings
if metdata == None:
metdata = self.metdata
if metdata == {}:
raise Exception("metdata doesnt exist yet. "+
"Run RadianceObj.readWeatherFile() ")
if axis_azimuth:
azimuth = axis_azimuth
warnings.warn("axis_azimuth is deprecated in set1axis; use azimuth "
"input instead.", DeprecationWarning)
#backtrack = True # include backtracking support in later version
#gcr = 1.0/3.0 # default value - not used if backtrack = False.
# get 1-axis tracker angles for this location, rounded to nearest 'angledelta'
trackerdict = metdata._set1axis(cumulativesky=cumulativesky,
azimuth=azimuth,
limit_angle=limit_angle,
angledelta=angledelta,
backtrack=backtrack,
gcr=gcr,
fixed_tilt_angle=fixed_tilt_angle,
useMeasuredTrackerAngle=useMeasuredTrackerAngle
)
self.trackerdict = trackerdict
self.cumulativesky = cumulativesky
return trackerdict
def gendaylit1axis(self, metdata=None, trackerdict=None, startdate=None,
enddate=None, debug=False):
"""
1-axis tracking implementation of gendaylit.
Creates multiple sky files, one for each time of day.
Parameters
------------
metdata
MetObj output from readWeatherFile. Needs to have
RadianceObj.set1axis() run on it first.
startdate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
enddate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
trackerdict : dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
Returns
-------
Updated trackerdict dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
with the additional dictionary value ['skyfile'] added
"""
if metdata is None:
metdata = self.metdata
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if startdate is not None or enddate is not None:
print("Deprecation Warning: gendyalit1axis no longer downselects"+
" entries by stardate and enddate. Downselect your data"+
" when loading with readWeatherFile")
return
try:
metdata.tracker_theta # this may not exist
except AttributeError:
print("metdata.tracker_theta doesn't exist. Run RadianceObj.set1axis() first")
if debug is False:
print('Creating ~%d skyfiles. '%(len(trackerdict.keys())))
count = 0 # counter to get number of skyfiles created, just for giggles
trackerdict2={}
for i in range(0, len(trackerdict.keys())):
try:
time = metdata.datetime[i]
except IndexError: #out of range error
break #
#filename = str(time)[5:-12].replace('-','_').replace(' ','_')
filename = time.strftime('%Y-%m-%d_%H%M')
self.name = filename
#check for GHI > 0
#if metdata.ghi[i] > 0:
if (metdata.ghi[i] > 0) & (~np.isnan(metdata.tracker_theta[i])):
skyfile = self.gendaylit(metdata=metdata,timeindex=i, debug=debug)
# trackerdict2 reduces the dict to only the range specified.
trackerdict2[filename] = trackerdict[filename]
trackerdict2[filename]['skyfile'] = skyfile
count +=1
print('Created {} skyfiles in /skies/'.format(count))
self.trackerdict = trackerdict2
return trackerdict2
def genCumSky1axis(self, trackerdict=None):
"""
1-axis tracking implementation of gencumulativesky.
Creates multiple .cal files and .rad files, one for each tracker angle.
Use :func:`readWeatherFile` to limit gencumsky simulations
Parameters
------------
trackerdict : dictionary
Trackerdict generated as output by RadianceObj.set1axis()
Returns
-------
trackerdict : dictionary
Trackerdict dictionary with new entry trackerdict.skyfile
Appends 'skyfile' to the 1-axis dict with the location of the sky .radfile
"""
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
for theta in sorted(trackerdict):
# call gencumulativesky with a new .cal and .rad name
csvfile = trackerdict[theta]['csvfile']
savefile = '1axis_%s'%(theta) #prefix for .cal file and skies\*.rad file
skyfile = self.genCumSky(gencumsky_metfile=csvfile, savefile=savefile)
trackerdict[theta]['skyfile'] = skyfile
print('Created skyfile %s'%(skyfile))
# delete default skyfile (not strictly necessary)
self.skyfiles = None
self.trackerdict = trackerdict
return trackerdict
def makeOct(self, filelist=None, octname=None):
"""
Combine everything together into a .oct file
Parameters
----------
filelist : list
Files to include. otherwise takes self.filelist
octname : str
filename (without .oct extension)
Returns
-------
octname : str
filename of .oct file in root directory including extension
err : str
Error message returned from oconv (if any)
"""
if filelist is None:
filelist = self.getfilelist()
if octname is None:
octname = self.name
debug = False
#JSS. With the way that the break is handled now, this will wait the 10 for all the hours
# that were not generated sky files.
if self.hpc :
import time
time_to_wait = 10
time_counter = 0
for file in filelist:
if debug:
print("HPC Checking for file %s" % (file))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#Filesky is being saved as 'none', so it crashes !
while not os.path.exists(file):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
print ("filenotfound")
break
#os.system('oconv '+ ' '.join(filelist) + ' > %s.oct' % (octname))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#cmd = 'oconv ' + ' '.join(filelist)
filelist.insert(0,'oconv')
with open('%s.oct' % (octname), "w") as f:
_,err = _popen(filelist, None, f)
#TODO: exception handling for no sun up
if err is not None:
if err[0:5] == 'error':
raise Exception(err[7:])
if err[0:7] == 'message':
warnings.warn(err[9:], Warning)
#use rvu to see if everything looks good.
# use cmd for this since it locks out the terminal.
#'rvu -vf views\side.vp -e .01 monopanel_test.oct'
print("Created %s.oct" % (octname))
self.octfile = '%s.oct' % (octname)
return '%s.oct' % (octname)
def makeOct1axis(self, trackerdict=None, singleindex=None, customname=None):
"""
Combine files listed in trackerdict into multiple .oct files
Parameters
------------
trackerdict
Output from :py:class:`~bifacial_radiance.RadianceObj.makeScene1axis`
singleindex : str
Single index for trackerdict to run makeOct1axis in single-value mode,
format 'YYYY-MM-DD_HHMM'.
customname : str
Custom text string added to the end of the OCT file name.
Returns
-------
trackerdict
Append 'octfile' to the 1-axis dict with the location of the scene .octfile
"""
if customname is None:
customname = ''
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # loop through all values in the tracker dictionary
indexlist = trackerdict.keys()
else: # just loop through one single index in tracker dictionary
indexlist = [singleindex]
print('\nMaking {} octfiles in root directory.'.format(indexlist.__len__()))
for index in sorted(indexlist): # run through either entire key list of trackerdict, or just a single value
try:
filelist = self.materialfiles + [trackerdict[index]['skyfile'], trackerdict[index]['radfile']]
octname = '1axis_%s%s'%(index, customname)
trackerdict[index]['octfile'] = self.makeOct(filelist, octname)
except KeyError as e:
print('Trackerdict key error: {}'.format(e))
return trackerdict
def makeModule(self, name=None, x=None, y=None, z=None, modulefile=None,
text=None, customtext='', xgap=0.01, ygap=0.0,
zgap=0.1, numpanels=1, rewriteModulefile=True,
glass=False, modulematerial=None, bifi=1, **kwargs):
"""
pass module generation details into ModuleObj(). See ModuleObj()
docstring for more details
"""
from bifacial_radiance import ModuleObj
if name is None:
print("usage: makeModule(name,x,y,z, modulefile = '\objects\*.rad', "+
" zgap = 0.1 (module offset)"+
"numpanels = 1 (# of panels in portrait), ygap = 0.05 "+
"(slope distance between panels when arrayed), "+
"rewriteModulefile = True (or False), bifi = 1")
print("You can also override module_type info by passing 'text'"+
"variable, or add on at the end for racking details with "+
"'customtext'. See function definition for more details")
print("Optional: tubeParams={} (torque tube details including "
"diameter (torque tube dia. in meters), tubetype='Round' "
"(or 'square', 'hex'), material='Metal_Grey' (or 'black')"
", axisofrotation=True (does scene rotate around tube)")
print("Optional: cellModule={} (create cell-level module by "+
" passing in dictionary with keys 'numcellsx'6 (#cells in "+
"X-dir.), 'numcellsy', 'xcell' (cell size in X-dir. in meters),"+
"'ycell', 'xcellgap' (spacing between cells in X-dir.), 'ycellgap'")
print("Optional: omegaParams={} (create the support structure omega by "+
"passing in dictionary with keys 'omega_material' (the material of "+
"omega), 'mod_overlap'(the length of the module adjacent piece of"+
" omega that overlaps with the module),'x_omega1', 'y_omega' (ideally same"+
" for all the parts of omega),'z_omega1', 'x_omega2' (X-dir length of the"+
" vertical piece), 'x_omega3', z_omega3")
return
"""
# TODO: check for deprecated torquetube and axisofrotationTorqueTube in
kwargs.
"""
if 'tubeParams' in kwargs:
tubeParams = kwargs.pop('tubeParams')
else:
tubeParams = None
if 'torquetube' in kwargs:
torquetube = kwargs.pop('torquetube')
print("\nWarning: boolean input `torquetube` passed into makeModule"
". Starting in v0.4.0 this boolean parameter is deprecated."
" Use module.addTorquetube() with `visible` parameter instead.")
if tubeParams:
tubeParams['visible'] = torquetube
elif (tubeParams is None) & (torquetube is True):
tubeParams = {'visible':True} # create default TT
if 'axisofrotationTorqueTube' in kwargs:
axisofrotation = kwargs.pop('axisofrotationTorqueTube')
print("\nWarning: input boolean `axisofrotationTorqueTube` passed "
"into makeModule. Starting in v0.4.0 this boolean parameter is"
" deprecated. Use module.addTorquetube() with `axisofrotation`"
"parameter instead.")
if tubeParams: #this kwarg only does somehting if there's a TT.
tubeParams['axisofrotation'] = axisofrotation
if self.hpc: # trigger HPC simulation in ModuleObj
kwargs['hpc']=True
self.module = ModuleObj(name=name, x=x, y=y, z=z, bifi=bifi, modulefile=modulefile,
text=text, customtext=customtext, xgap=xgap, ygap=ygap,
zgap=zgap, numpanels=numpanels,
rewriteModulefile=rewriteModulefile, glass=glass,
modulematerial=modulematerial, tubeParams=tubeParams,
**kwargs)
return self.module
def makeCustomObject(self, name=None, text=None):
"""
Function for development and experimenting with extraneous objects in the scene.
This function creates a `name.rad` textfile in the objects folder
with whatever text that is passed to it.
It is up to the user to pass the correct radiance format.
For example, to create a box at coordinates 0,0 (with its bottom surface
on the plane z=0):
.. code-block:
name = 'box'
text='! genbox black PVmodule 0.5 0.5 0.5 | xform -t -0.25 -0.25 0'
Parameters
----------
name : str
String input to name the module type
text : str
Text used in the radfile to generate the module
"""
customradfile = os.path.join('objects', '%s.rad'%(name)) # update in 0.2.3 to shorten radnames
# py2 and 3 compatible: binary write, encode text first
with open(customradfile, 'wb') as f:
f.write(text.encode('ascii'))
print("\nCustom Object Name", customradfile)
self.customradfile = customradfile
return customradfile
def printModules(self):
# print available module types from ModuleObj
from bifacial_radiance import ModuleObj
modulenames = ModuleObj().readModule()
print('Available module names: {}'.format([str(x) for x in modulenames]))
return modulenames
def makeScene(self, module=None, sceneDict=None, radname=None,
moduletype=None):
"""
Create a SceneObj which contains details of the PV system configuration including
tilt, row pitch, height, nMods per row, nRows in the system...
Parameters
----------
module : str or ModuleObj
String name of module created with makeModule()
sceneDict : dictionary
Dictionary with keys: `tilt`, `clearance_height`*, `pitch`,
`azimuth`, `nMods`, `nRows`, `hub_height`*, `height`*
* height deprecated from sceneDict. For makeScene (fixed systems)
if passed it is assumed it reffers to clearance_height.
`clearance_height` recommended for fixed_tracking systems.
`hub_height` can also be passed as a possibility.
radname : str
Gives a custom name to the scene file. Useful when parallelizing.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
-------
SceneObj
'scene' with configuration details
"""
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('makeScene(module, sceneDict, nMods, nRows). '+\
'Available moduletypes: ' )
self.printModules() #print available module types
return
self.scene = SceneObj(module)
self.scene.hpc = self.hpc #pass HPC mode from parent
if sceneDict is None:
print('makeScene(moduletype, sceneDict, nMods, nRows). '+\
'sceneDict inputs: .tilt .clearance_height .pitch .azimuth')
return self.scene
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
# Fixed tilt routine
# Preferred: clearance_height,
# If only height is passed, it is assumed to be clearance_height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='clearance_height',
nonpreferred='hub_height')
self.nMods = sceneDict['nMods']
self.nRows = sceneDict['nRows']
self.sceneRAD = self.scene._makeSceneNxR(sceneDict=sceneDict,
radname=radname)
if 'appendRadfile' not in sceneDict:
appendRadfile = False
else:
appendRadfile = sceneDict['appendRadfile']
if appendRadfile:
debug = False
try:
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDED!")
except:
#TODO: Manage situation where radfile was created with
#appendRadfile to False first..
self.radfiles=[]
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDAGE created!")
else:
self.radfiles = [self.sceneRAD]
return self.scene
def appendtoScene(self, radfile=None, customObject=None, text=''):
"""
Appends to the `Scene radfile` in folder `\objects` the text command in Radiance
lingo created by the user.
Useful when using addCustomObject to the scene.
Parameters
----------
radfile: str
Directory and name of where .rad scene file is stored
customObject : str
Directory and name of custom object .rad file is stored
text : str
Command to be appended to the radfile. Do not leave empty spaces
at the end.
Returns
-------
Nothing, the radfile must already be created and assigned when running this.
"""
#TODO: Add a custom name and replace radfile name
# py2 and 3 compatible: binary write, encode text first
text2 = '\n' + text + ' ' + customObject
debug = False
if debug:
print (text2)
with open(radfile, 'a+') as f:
f.write(text2)
def makeScene1axis(self, trackerdict=None, module=None, sceneDict=None,
cumulativesky=None, moduletype=None):
"""
Creates a SceneObj for each tracking angle which contains details of the PV
system configuration including row pitch, hub_height, nMods per row, nRows in the system...
Parameters
------------
trackerdict
Output from GenCumSky1axis
module : str or ModuleObj
Name or ModuleObj created with makeModule()
sceneDict :
Dictionary with keys:`tilt`, `hub_height`, `pitch`, `azimuth`
cumulativesky : bool
Defines if sky will be generated with cumulativesky or gendaylit.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
--------
trackerdict
Append the following keys
'radfile'
directory where .rad scene file is stored
'scene'
SceneObj for each tracker theta
'clearance_height'
Calculated ground clearance based on
`hub height`, `tilt` angle and overall collector width `sceney`
"""
import math
if sceneDict is None:
print('usage: makeScene1axis(module, sceneDict, nMods, nRows).'+
'sceneDict inputs: .hub_height .azimuth .nMods .nRows'+
'and .pitch or .gcr')
return
# If no nRows or nMods assigned on deprecated variable or dictionary,
# assign default.
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if cumulativesky is None:
try:
# see if cumulativesky = False was set earlier,
# e.g. in RadianceObj.set1axis
cumulativesky = self.cumulativesky
except AttributeError:
# default cumulativesky = true to maintain backward compatibility.
cumulativesky = True
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('usage: makeScene1axis(trackerdict, module, '+
'sceneDict, nMods, nRows). ')
self.printModules() #print available module types
return
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
# 1axis routine
# Preferred hub_height
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight:
simplefix = 0
hubheight = sceneDict['clearance_height'] # Not really, but this is the fastest
# to make it work with the simplefix as below the actual clearnace height
# gets calculated and the 0 sets the cosine correction to 0.
# TODO CLEAN THIS UP.
else:
#the hub height is the tracker height at center of rotation.
hubheight = sceneDict['hub_height']
simplefix = 1
if cumulativesky is True: # cumulativesky workflow
print('\nMaking .rad files for cumulativesky 1-axis workflow')
for theta in trackerdict:
scene = SceneObj(module)
if trackerdict[theta]['surf_azm'] >= 180:
trackerdict[theta]['surf_azm'] = trackerdict[theta]['surf_azm']-180
trackerdict[theta]['surf_tilt'] = trackerdict[theta]['surf_tilt']*-1
radname = '1axis%s_'%(theta,)
# Calculating clearance height for this theta.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
# Calculate the ground clearance height based on the hub height. Add abs(theta) to avoid negative tilt angle errors
trackerdict[theta]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed, not pitch
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[theta]['radfile'] = radfile
trackerdict[theta]['scene'] = scene
print('{} Radfiles created in /objects/'.format(trackerdict.__len__()))
else: #gendaylit workflow
print('\nMaking ~%s .rad files for gendaylit 1-axis workflow (this takes a minute..)' % (len(trackerdict)))
count = 0
for time in trackerdict:
scene = SceneObj(module)
if trackerdict[time]['surf_azm'] >= 180:
trackerdict[time]['surf_azm'] = trackerdict[time]['surf_azm']-180
trackerdict[time]['surf_tilt'] = trackerdict[time]['surf_tilt']*-1
theta = trackerdict[time]['theta']
radname = '1axis%s_'%(time,)
# Calculating clearance height for this time.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
if trackerdict[time]['ghi'] > 0:
trackerdict[time]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed instead of pitch
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[time]['radfile'] = radfile
trackerdict[time]['scene'] = scene
count+=1
print('{} Radfiles created in /objects/'.format(count))
self.trackerdict = trackerdict
self.nMods = sceneDict['nMods'] #assign nMods and nRows to RadianceObj
self.nRows = sceneDict['nRows']
self.hub_height = hubheight
return trackerdict
def analysis1axis(self, trackerdict=None, singleindex=None, accuracy='low',
customname=None, modWanted=None, rowWanted=None,
sensorsy=9, sensorsx=1,
modscanfront = None, modscanback = None, relative=False,
debug=False ):
"""
Loop through trackerdict and runs linescans for each scene and scan in there.
Parameters
----------------
trackerdict
singleindex : str
For single-index mode, just the one index we want to run (new in 0.2.3).
Example format '21_06_14_12_30' for 2021 June 14th 12:30 pm
accuracy : str
'low' or 'high', resolution option used during _irrPlot and rtrace
customname : str
Custom text string to be added to the file name for the results .CSV files
modWanted : int
Module to be sampled. Index starts at 1.
rowWanted : int
Row to be sampled. Index starts at 1. (row 1)
sensorsy : int or list
Number of 'sensors' or scanning points along the collector width
(CW) of the module(s). If multiple values are passed, first value
represents number of front sensors, second value is number of back sensors
sensorsx : int or list
Number of 'sensors' or scanning points along the length, the side perpendicular
to the collector width (CW) of the module(s) for the back side of the module.
If multiple values are passed, first value represents number of
front sensors, second value is number of back sensors.
modscanfront : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
modscanback : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
debug : Bool
Activates internal printing of the function to help debugging.
Returns
-------
trackerdict with new keys:
'AnalysisObj' : analysis object for this tracker theta
'Wm2Front' : list of front Wm-2 irradiances, len=sensorsy_back
'Wm2Back' : list of rear Wm-2 irradiances, len=sensorsy_back
'backRatio' : list of rear irradiance ratios, len=sensorsy_back
RadianceObj with new appended values:
'Wm2Front' : np Array with front irradiance cumulative
'Wm2Back' : np Array with rear irradiance cumulative
'backRatio' : np Array with rear irradiance ratios
"""
import warnings
if customname is None:
customname = ''
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # run over all values in trackerdict
trackerkeys = sorted(trackerdict.keys())
else: # run in single index mode.
trackerkeys = [singleindex]
if modWanted == None:
modWanted = round(self.nMods / 1.99)
if rowWanted == None:
rowWanted = round(self.nRows / 1.99)
frontWm2 = 0 # container for tracking front irradiance across module chord. Dynamically size based on first analysis run
backWm2 = 0 # container for tracking rear irradiance across module chord.
for index in trackerkeys: # either full list of trackerdict keys, or single index
name = '1axis_%s%s'%(index,customname)
octfile = trackerdict[index]['octfile']
scene = trackerdict[index]['scene']
if octfile is None:
continue # don't run analysis if the octfile is none
try: # look for missing data
analysis = AnalysisObj(octfile,name)
name = '1axis_%s%s'%(index,customname,)
frontscanind, backscanind = analysis.moduleAnalysis(scene=scene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
analysis.analysis(octfile=octfile,name=name,frontscan=frontscanind,backscan=backscanind,accuracy=accuracy)
trackerdict[index]['AnalysisObj'] = analysis
except Exception as e: # problem with file. TODO: only catch specific error types here.
warnings.warn('Index: {}. Problem with file. Error: {}. Skipping'.format(index,e), Warning)
return
#combine cumulative front and back irradiance for each tracker angle
try: #on error, trackerdict[index] is returned empty
trackerdict[index]['Wm2Front'] = analysis.Wm2Front
trackerdict[index]['Wm2Back'] = analysis.Wm2Back
trackerdict[index]['backRatio'] = analysis.backRatio
except AttributeError as e: # no key Wm2Front.
warnings.warn('Index: {}. Trackerdict key not found: {}. Skipping'.format(index,e), Warning)
return
if np.sum(frontWm2) == 0: # define frontWm2 the first time through
frontWm2 = np.array(analysis.Wm2Front)
backWm2 = np.array(analysis.Wm2Back)
else:
frontWm2 += np.array(analysis.Wm2Front)
backWm2 += np.array(analysis.Wm2Back)
print('Index: {}. Wm2Front: {}. Wm2Back: {}'.format(index,
np.mean(analysis.Wm2Front), np.mean(analysis.Wm2Back)))
if np.sum(self.Wm2Front) == 0:
self.Wm2Front = frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back = backWm2
else:
self.Wm2Front += frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back += backWm2
self.backRatio = np.mean(backWm2)/np.mean(frontWm2+.001)
# Save compiled results using _saveresults
if singleindex is None:
print ("Saving a cumulative-results file in the main simulation folder." +
"This adds up by sensor location the irradiance over all hours " +
"or configurations considered." +
"\nWarning: This file saving routine does not clean results, so "+
"if your setup has ygaps, or 2+modules or torque tubes, doing "+
"a deeper cleaning and working with the individual results "+
"files in the results folder is highly suggested.")
cumfilename = 'cumulative_results_%s.csv'%(customname)
if self.cumulativesky is True:
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
temptrackerdict = trackerdict[list(trackerdict)[0]]['AnalysisObj']
#temptrackerdict = trackerdict[0.0]['AnalysisObj']
frontcum ['x'] = temptrackerdict.x
frontcum ['y'] = temptrackerdict.y
frontcum ['z'] = temptrackerdict.z
frontcum ['mattype'] = temptrackerdict.mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = temptrackerdict.x
rearcum ['y'] = temptrackerdict.x
rearcum ['z'] = temptrackerdict.rearZ
rearcum ['mattype'] = temptrackerdict.rearMat
rearcum ['Wm2'] = self.Wm2Back
cumanalysisobj = AnalysisObj()
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
else: # trackerkeys are day/hour/min, and there's no easy way to find a
# tilt of 0, so making a fake linepoint object for tilt 0
# and then saving.
try:
cumscene = trackerdict[trackerkeys[0]]['scene']
cumscene.sceneDict['tilt']=0
cumscene.sceneDict['clearance_height'] = self.hub_height
cumanalysisobj = AnalysisObj()
frontscancum, backscancum = cumanalysisobj.moduleAnalysis(scene=cumscene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
x,y,z = cumanalysisobj._linePtsArray(frontscancum)
x,y,rearz = cumanalysisobj._linePtsArray(backscancum)
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
frontcum ['x'] = x
frontcum ['y'] = y
frontcum ['z'] = z
frontcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = x
rearcum ['y'] = y
rearcum ['z'] = rearz
rearcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].rearMat
rearcum ['Wm2'] = self.Wm2Back
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
except:
print("Not able to save a cumulative result for this simulation.")
return trackerdict
# End RadianceObj definition
class GroundObj:
"""
Class to set and return details for the ground surface materials and reflectance.
If 1 albedo value is passed, it is used as default.
If 3 albedo values are passed, they are assigned to each of the three wavelength placeholders (RGB),
If material type is known, it is used to get reflectance info.
if material type isn't known, material_info.list is returned
Parameters
------------
materialOrAlbedo : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
"""
def __init__(self, materialOrAlbedo=None, material_file=None):
import warnings
from numbers import Number
self.normval = None
self.ReflAvg = None
self.Rrefl = None
self.Grefl = None
self.Brefl = None
self.ground_type = 'custom'
if material_file is None:
material_file = 'ground.rad'
self.material_file = material_file
if materialOrAlbedo is None: # Case where it's none.
print('\nInput albedo 0-1, or string from ground.printGroundMaterials().'
'\nAlternatively, run setGround after readWeatherData()'
'and setGround will read metdata.albedo if available')
return
if isinstance(materialOrAlbedo, str) :
self.ground_type = materialOrAlbedo
# Return the RGB albedo for material ground_type
materialOrAlbedo = self.printGroundMaterials(self.ground_type)
# Check for double and int.
if isinstance(materialOrAlbedo, Number):
materialOrAlbedo = np.array([[materialOrAlbedo,
materialOrAlbedo, materialOrAlbedo]])
if isinstance(materialOrAlbedo, list):
materialOrAlbedo = np.asarray(materialOrAlbedo)
# By this point, materialOrAlbedo should be a np.ndarray:
if isinstance(materialOrAlbedo, np.ndarray):
if materialOrAlbedo.ndim == 0:
# numpy array of one single value, i.e. np.array(0.62)
# after this if, np.array([0.62])
materialOrAlbedo = materialOrAlbedo.reshape([1])
if materialOrAlbedo.ndim == 1:
# If np.array is ([0.62]), this repeats it so at the end it's
# np.array ([0.62, 0.62, 0.62])
materialOrAlbedo = np.repeat(np.array([materialOrAlbedo]),
3, axis=1).reshape(
len(materialOrAlbedo),3)
if (materialOrAlbedo.ndim == 2) & (materialOrAlbedo.shape[1] > 3):
warnings.warn("Radiance only raytraces 3 wavelengths at "
"a time. Trimming albedo np.array input to "
"3 wavelengths.")
materialOrAlbedo = materialOrAlbedo[:,0:3]
# By this point we should have np.array of dim=2 and shape[1] = 3.
# Check for invalid values
if (materialOrAlbedo > 1).any() or (materialOrAlbedo < 0).any():
print('Warning: albedo values greater than 1 or less than 0. '
'Constraining to [0..1]')
materialOrAlbedo = materialOrAlbedo.clip(min=0, max=1)
try:
self.Rrefl = materialOrAlbedo[:,0]
self.Grefl = materialOrAlbedo[:,1]
self.Brefl = materialOrAlbedo[:,2]
self.normval = _normRGB(materialOrAlbedo[:,0],materialOrAlbedo[:,1],
materialOrAlbedo[:,2])
self.ReflAvg = np.round(np.mean(materialOrAlbedo, axis=1),4)
print(f'Loading albedo, {self.ReflAvg.__len__()} value(s), '
f'{self._nonzeromean(self.ReflAvg):0.3f} avg\n'
f'{self.ReflAvg[self.ReflAvg != 0].__len__()} nonzero albedo values.')
except IndexError as e:
print('albedo.shape should be 3 column (N x 3)')
raise e
def printGroundMaterials(self, materialString=None):
"""
printGroundMaterials(materialString=None)
input: None or materialString. If None, return list of acceptable
material types from ground.rad. If valid string, return RGB albedo
of the material type selected.
"""
import warnings
material_path = 'materials'
f = open(os.path.join(material_path, self.material_file))
keys = [] #list of material key names
Rreflall = []; Greflall=[]; Breflall=[] #RGB material reflectance
temp = f.read().split()
f.close()
#return indices for 'plastic' definition
index = _findme(temp,'plastic')
for i in index:
keys.append(temp[i+1])# after plastic comes the material name
Rreflall.append(float(temp[i+5]))#RGB reflectance comes a few more down the list
Greflall.append(float(temp[i+6]))
Breflall.append(float(temp[i+7]))
if materialString is not None:
try:
index = _findme(keys,materialString)[0]
except IndexError:
warnings.warn('Error - materialString not in '
f'{self.material_file}: {materialString}')
return(np.array([[Rreflall[index], Greflall[index], Breflall[index]]]))
else:
return(keys)
def _nonzeromean(self, val):
''' array mean excluding zero. return zero if everything's zero'''
tempmean = np.nanmean(val)
if tempmean > 0:
tempmean = np.nanmean(val[val !=0])
return tempmean
def _makeGroundString(self, index=0, cumulativesky=False):
'''
create string with ground reflectance parameters for use in
gendaylit and gencumsky.
Parameters
-----------
index : integer
Index of time for time-series albedo. Default 0
cumulativesky: Boolean
If true, set albedo to average of time series values.
Returns
-------
groundstring: text with albedo details to append to sky.rad in
gendaylit
'''
try:
if cumulativesky is True:
Rrefl = self._nonzeromean(self.Rrefl)
Grefl = self._nonzeromean(self.Grefl)
Brefl = self._nonzeromean(self.Brefl)
normval = _normRGB(Rrefl, Grefl, Brefl)
else:
Rrefl = self.Rrefl[index]
Grefl = self.Grefl[index]
Brefl = self.Brefl[index]
normval = _normRGB(Rrefl, Grefl, Brefl)
# Check for all zero albedo case
if normval == 0:
normval = 1
groundstring = ( f'\nskyfunc glow ground_glow\n0\n0\n4 '
f'{Rrefl/normval} {Grefl/normval} {Brefl/normval} 0\n'
'\nground_glow source ground\n0\n0\n4 0 0 -1 180\n'
f'\nvoid plastic {self.ground_type}\n0\n0\n5 '
f'{Rrefl:0.3f} {Grefl:0.3f} {Brefl:0.3f} 0 0\n'
f"\n{self.ground_type} ring groundplane\n"
'0\n0\n8\n0 0 -.01\n0 0 1\n0 100' )
except IndexError as err:
print(f'Index {index} passed to albedo with only '
f'{self.Rrefl.__len__()} values.' )
raise err
return groundstring
class SceneObj:
'''
scene information including PV module type, bifaciality, array info
pv module orientation defaults: Azimuth = 180 (south)
pv module origin: z = 0 bottom of frame. y = 0 lower edge of frame.
x = 0 vertical centerline of module
scene includes module details (x,y,bifi, sceney (collector_width), scenex)
'''
def __repr__(self):
return str(self.__dict__)
def __init__(self, module=None):
''' initialize SceneObj
'''
from bifacial_radiance import ModuleObj
# should sceneDict be initialized here? This is set in _makeSceneNxR
if module is None:
return
elif type(module) == str:
self.module = ModuleObj(name=module)
elif type(module) == ModuleObj: # try moduleObj
self.module = module
#self.moduleDict = self.module.getDataDict()
#self.scenex = self.module.scenex
#self.sceney = self.module.sceney
#self.offsetfromaxis = self.moduleDict['offsetfromaxis']
#TODO: get rid of these 4 values
self.modulefile = self.module.modulefile
self.hpc = False #default False. Set True by makeScene after sceneobj created.
def _makeSceneNxR(self, modulename=None, sceneDict=None, radname=None):
"""
Arrange module defined in :py:class:`bifacial_radiance.SceneObj` into a N x R array.
Returns a :py:class:`bifacial_radiance.SceneObj` which contains details
of the PV system configuration including `tilt`, `row pitch`, `hub_height`
or `clearance_height`, `nMod`s per row, `nRows` in the system.
The returned scene has (0,0) coordinates centered at the module at the
center of the array. For 5 rows, that is row 3, for 4 rows, that is
row 2 also (rounds down). For 5 modules in the row, that is module 3,
for 4 modules in the row, that is module 2 also (rounds down)
Parameters
------------
modulename: str
Name of module created with :py:class:`~bifacial_radiance.RadianceObj.makeModule`.
sceneDict : dictionary
Dictionary of scene parameters.
clearance_height : numeric
(meters).
pitch : numeric
Separation between rows
tilt : numeric
Valid input ranges -90 to 90 degrees
azimuth : numeric
A value denoting the compass direction along which the
axis of rotation lies. Measured in decimal degrees East
of North. [0 to 180) possible.
nMods : int
Number of modules per row (default = 20)
nRows : int
Number of rows in system (default = 7)
radname : str
String for name for radfile.
Returns
-------
radfile : str
Filename of .RAD scene in /objects/
scene : :py:class:`~bifacial_radiance.SceneObj `
Returns a `SceneObject` 'scene' with configuration details
"""
if modulename is None:
modulename = self.module.name
if sceneDict is None:
print('makeScene(modulename, sceneDict, nMods, nRows). sceneDict'
' inputs: .tilt .azimuth .nMods .nRows'
' AND .tilt or .gcr ; AND .hub_height or .clearance_height')
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'axis_tilt' not in sceneDict:
sceneDict['axis_tilt'] = 0
if 'originx' not in sceneDict:
sceneDict['originx'] = 0
if 'originy' not in sceneDict:
sceneDict['originy'] = 0
if radname is None:
radname = str(self.module.name).strip().replace(' ', '_')
# loading variables
tilt = sceneDict['tilt']
azimuth = sceneDict['azimuth']
nMods = sceneDict['nMods']
nRows = sceneDict['nRows']
axis_tilt = sceneDict['axis_tilt']
originx = sceneDict ['originx']
originy = sceneDict['originy']
# hub_height, clearance_height and height logic.
# this routine uses hub_height to move the panels up so it's important
# to have a value for that, either obtianing from clearance_height
# (if coming from makeScene) or from hub_height itself.
# it is assumed that if no clearance_height or hub_height is passed,
# hub_height = height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict, preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight :
hubheight = sceneDict['clearance_height'] + 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney - self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
title_clearance_height = sceneDict['clearance_height']
else:
hubheight = sceneDict['hub_height']
# this calculates clearance_height, used for the title
title_clearance_height = sceneDict['hub_height'] - 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney + self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
try:
if sceneDict['pitch'] >0:
pitch = sceneDict['pitch']
else:
raise Exception('default to gcr')
except:
if 'gcr' in sceneDict:
pitch = np.round(self.module.sceney/sceneDict['gcr'],3)
else:
raise Exception('No valid `pitch` or `gcr` in sceneDict')
''' INITIALIZE VARIABLES '''
text = '!xform '
text += '-rx %s -t %s %s %s ' %(tilt, 0, 0, hubheight)
# create nMods-element array along x, nRows along y. 1cm module gap.
text += '-a %s -t %s 0 0 -a %s -t 0 %s 0 ' %(nMods, self.module.scenex, nRows, pitch)
# azimuth rotation of the entire shebang. Select the row to scan here based on y-translation.
# Modifying so center row is centered in the array. (i.e. 3 rows, row 2. 4 rows, row 2 too)
# Since the array is already centered on row 1, module 1, we need to increment by Nrows/2-1 and Nmods/2-1
text += (f'-i 1 -t {-self.module.scenex*(round(nMods/1.999)*1.0-1)} '
f'{-pitch*(round(nRows / 1.999)*1.0-1)} 0 -rz {180-azimuth} '
f'-t {originx} {originy} 0 ' )
#axis tilt only working for N-S trackers
if axis_tilt != 0 and azimuth == 90:
print("Axis_Tilt is still under development. The scene will be "
"created with the proper axis tilt, and the tracking angle"
"will consider the axis_tilt, but the sensors for the "
"analysis might not fall in the correct surfaces unless you"
" manually position them for this version. Sorry! :D ")
text += (f'-rx {axis_tilt} -t 0 0 %s ' %(
self.module.scenex*(round(nMods/1.99)*1.0-1)*np.sin(
axis_tilt * np.pi/180) ) )
filename = (f'{radname}_C_{title_clearance_height:0.5f}_rtr_{pitch:0.5f}_tilt_{tilt:0.5f}_'
f'{nMods}modsx{nRows}rows_origin{originx},{originy}.rad' )
if self.hpc:
text += f'"{os.path.join(os.getcwd(), self.modulefile)}"'
radfile = os.path.join(os.getcwd(), 'objects', filename)
else:
text += os.path.join(self.modulefile)
radfile = os.path.join('objects',filename )
# py2 and 3 compatible: binary write, encode text first
with open(radfile, 'wb') as f:
f.write(text.encode('ascii'))
self.gcr = self.module.sceney / pitch
self.text = text
self.radfiles = radfile
self.sceneDict = sceneDict
# self.hub_height = hubheight
return radfile
def showScene(self):
"""
Method to call objview on the scene included in self
"""
cmd = 'objview %s %s' % (os.path.join('materials', 'ground.rad'),
self.radfiles)
print('Rendering scene. This may take a moment...')
_,err = _popen(cmd,None)
if err is not None:
print('Error: {}'.format(err))
print('possible solution: install radwinexe binary package from '
'http://www.jaloxa.eu/resources/radiance/radwinexe.shtml'
' into your RADIANCE binaries path')
return
# end of SceneObj
class MetObj:
"""
Meteorological data from EPW file.
Initialize the MetObj from tmy data already read in.
Parameters
-----------
tmydata : DataFrame
TMY3 output from :py:class:`~bifacial_radiance.RadianceObj.readTMY` or
from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
metadata : Dictionary
Metadata output from output from :py:class:`~bifacial_radiance.RadianceObj.readTMY``
or from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
label : str
label : str
'left', 'right', or 'center'. For data that is averaged, defines if the
timestamp refers to the left edge, the right edge, or the center of the
averaging interval, for purposes of calculating sunposition. For
example, TMY3 data is right-labeled, so 11 AM data represents data from
10 to 11, and sun position should be calculated at 10:30 AM. Currently
SAM and PVSyst use left-labeled interval data and NSRDB uses centered.
"""
def __init__(self, tmydata, metadata, label = 'right'):
import pytz
import pvlib
#import numpy as np
#First prune all GHI = 0 timepoints. New as of 0.4.0
# TODO: is this a good idea? This changes default behavior...
tmydata = tmydata[tmydata.GHI > 0]
# location data. so far needed:
# latitude, longitude, elevation, timezone, city
self.latitude = metadata['latitude']; lat=self.latitude
self.longitude = metadata['longitude']; lon=self.longitude
self.elevation = metadata['altitude']; elev=self.elevation
self.timezone = metadata['TZ']
try:
self.city = metadata['Name'] # readepw version
except KeyError:
self.city = metadata['city'] # pvlib version
#self.location.state_province_region = metadata['State'] # unecessary
self.datetime = tmydata.index.tolist() # this is tz-aware.
self.ghi = np.array(tmydata.GHI)
self.dhi = np.array(tmydata.DHI)
self.dni = np.array(tmydata.DNI)
try:
self.albedo = np.array(tmydata.Alb)
except AttributeError: # no TMY albedo data
self.albedo = None
# Try and retrieve dewpoint and pressure
try:
self.dewpoint = np.array(tmydata['temp_dew'])
except KeyError:
self.dewpoint = None
try:
self.pressure = np.array(tmydata['atmospheric_pressure'])
except KeyError:
self.pressure = None
try:
self.temp_air = np.array(tmydata['temp_air'])
except KeyError:
self.temp_air = None
try:
self.wind_speed = np.array(tmydata['wind_speed'])
except KeyError:
self.wind_speed = None
# Try and retrieve TrackerAngle
try:
self.meastracker_angle = np.array(tmydata['Tracker Angle (degrees)'])
except KeyError:
self.meastracker_angle= None
#v0.2.5: initialize MetObj with solpos, sunrise/set and corrected time
datetimetz = pd.DatetimeIndex(self.datetime)
try: # make sure the data is tz-localized.
datetimetz = datetimetz.tz_localize(pytz.FixedOffset(self.timezone*60))# use pytz.FixedOffset (in minutes)
except TypeError: # data is tz-localized already. Just put it in local time.
datetimetz = datetimetz.tz_convert(pytz.FixedOffset(self.timezone*60))
#check for data interval. default 1h.
try:
interval = datetimetz[1]-datetimetz[0]
except IndexError:
interval = pd.Timedelta('1h') # ISSUE: if 1 datapoint is passed, are we sure it's hourly data?
print ("WARNING: TMY interval was unable to be defined, so setting it to 1h.")
# TODO: Refactor this into a subfunction. first calculate minutedelta
# based on label and interval (-30, 0, +30, +7.5 etc) then correct all.
if label.lower() == 'center':
print("Calculating Sun position for center labeled data, at exact timestamp in input Weather File")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['corrected_timestamp'] = datetimetz
else:
if interval== | pd.Timedelta('1h') | pandas.Timedelta |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#pct change of returns
def returns(dataset):
returns = dataset.pct_change()
returns = returns*100
returns = returns.dropna()
print(returns)
#compounded percentage product of returns
def product(dataset):
returns = dataset.pct_change()
x = (((returns+1).prod()-1)*100).round(2)
print('compounded percentage product of returns is :')
print(x)
def month_annualize(month_risk):
x = (1+month_risk)**12 - 1
print('The annualized return given the monthly returns is:')
print(x)
def annual_volatility(dataset):
returns = dataset.pct_change()
returns = returns.dropna()
deviations = returns - returns.mean()
squared_deviations = deviations**2
number_of_obs = returns.shape[0]
mean_squared_deviations = squared_deviations.sum()/(number_of_obs-1)
volatility = mean_squared_deviations**0.5
annualized_vol = volatility*(12**0.5)
print('volatility is : ' , volatility)
print('Annual Volatility is : ', annualized_vol)
def returns_month(dataset):
returns = dataset/100
n_months = returns.shape[0]
return_per_month = (returns+1).prod()**(1/n_months) - 1
print('The Return Per Month is : ' , return_per_month)
def annualized_ret(dataset):
returns = dataset/100
n_months = returns.shape[0]
#return_per_month = (returns+1).prod()**(1/n_months) - 1
annualized_return = (returns+1).prod()**(12/n_months) - 1
print('Ann Ret is : ' , annualized_return)
#def sharpe(dataset = 'dataset' , riskfree_rate = 'riskfree_rate'):
def sharpe(dataset,riskfree_rate):
returns = dataset/100
n_months = returns.shape[0]
return_per_month = (returns+1).prod()**(1/n_months) - 1
annualized_return = (returns+1).prod()**(12/n_months) - 1
annualized_vol = returns.std()*(12**0.5)
excess_return = annualized_return - riskfree_rate
sharpe_ratio = excess_return/annualized_vol
print('The Monthly Returns is : ', return_per_month)
print('The Annual Returns is : ', annualized_return)
print('The Sharpe Ratio is : ', sharpe_ratio)
'''
Drawdown:
Takes a time series of asset returns
Computes & returns a Dataframs that contains:
Wealth index
Previous Peaks
'''
def date(dataset):
dataset.index = | pd.to_datetime(dataset.index, format="%Y%m") | pandas.to_datetime |
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from dateutil import relativedelta
import dateparser
import datetime
import warnings
warnings.filterwarnings('ignore')
URL = "http://www.tns-sofres.com/cotes-de-popularites"
resultats = requests.get(URL)
page = BeautifulSoup(resultats.text, 'html.parser')
presidents = page.find_all("tr")
#print(presidents[3].findAll("td"))
code_prez = []
for i in range(1,12,2):
code_prez.append(presidents[i].findAll("td")[1].find_all("a", href=True)[0]["href"].split("code_nom=")[1])
print(code_prez)
url_code = "http://www.tns-sofres.com/dataviz?type=1&code_nom="
dfF = | pd.DataFrame(columns=["President", "Confiance", "Pas Confiance"]) | pandas.DataFrame |
import json
import pandas as pd
import csv
NUM_PLAYER_LIST = ["6p", "9p"]
def transformHand(handsStr):
handList = handsStr.split(",")
hand1 = handList[0].strip()
hand2 = handList[1].strip()
hand1Num = hand1[0]
hand1Suit = hand1[1]
hand2Num = hand2[0]
hand2Suit = hand2[1]
# Data is designed to have hand1Num > hand2Num
# Thus, no need to check and reverse the hands
# When hand is pair
if(hand1Num == hand2Num):
return ("Pair " + hand1Num.upper())
# When hand is suited
if(hand1Suit == hand2Suit):
return ("Suited " + (hand1Num + hand2Num).upper())
return ("Offsuit " + (hand1Num + hand2Num).upper())
def createTransformedHandOddsDict(handList, oddsList):
transformedHandOddsDict = {}
for i in range(len(handList)):
hand = handList[i]
odds = oddsList[i]
transformedHand = transformHand(hand)
if(transformedHand not in transformedHandOddsDict):
transformedHandOddsDict[transformedHand] = []
transformedHandOddsDict[transformedHand].append(odds)
return transformedHandOddsDict
def createHandAvgOddsTupleList(transformedHandOddsDict):
tHandAvgOddsTupleList = []
for tHand, oddsList in transformedHandOddsDict.items():
avgOdds = round(sum(oddsList)/float(len(oddsList)), 5)
tHandAvgOddsTupleList.append((tHand, avgOdds))
tHandAvgOddsTupleList = sorted(tHandAvgOddsTupleList, key=lambda x:x[1], reverse=True)
return tHandAvgOddsTupleList
def createHandAvgOddsTupleDict(transformedHandOddsDict):
tHandAvgOddsTupleDict = {}
for tHand, oddsList in transformedHandOddsDict.items():
avgOdds = round(sum(oddsList)/float(len(oddsList)), 5)
tHandAvgOddsTupleDict[tHand] = avgOdds
return tHandAvgOddsTupleDict
def mergeResultIntoDf(tHandAvgOdds6pTupleList, transformedHandAvgOdds9pDict):
mergedResultDict = {
'rank': [],
'hand': [],
'odds_6p': [],
'odds_9p': []
}
for i in range(len(tHandAvgOdds6pTupleList)):
tHand6p = tHandAvgOdds6pTupleList[i][0]
odds6p = tHandAvgOdds6pTupleList[i][1]
odds9p = transformedHandAvgOdds9pDict[tHand6p]
mergedResultDict['rank'].append(i+1)
mergedResultDict['hand'].append(tHand6p)
mergedResultDict['odds_6p'].append(odds6p)
mergedResultDict['odds_9p'].append(odds9p)
mergedResultDf = | pd.DataFrame(data=mergedResultDict) | pandas.DataFrame |
import sys
import re
from pathlib import Path
import logging
from typing import Optional, Union
import pandas as pd
logger = logging.getLogger(__name__)
class GradsCtl(object):
def __init__(self):
self.dset = None # data file path
self.dset_template = False
self.title = ''
self.options = list()
self.data_endian = 'little'
self.local_endian = sys.byteorder
self.yrev = 0
self.undef = None
self.start_time = None
self.forecast_time = None
# dimension
self.xdef = None
self.ydef = None
self.zdef = None
self.tdef = None
self.vars = []
self.record = []
def get_data_file_path(self, record):
return GradsCtlParser.get_data_file_path(self, record)
class GradsCtlParser(object):
def __init__(self, grads_ctl: Optional[GradsCtl] = None):
self.ctl_file_path = None
if grads_ctl is None:
grads_ctl = GradsCtl()
self.grads_ctl = grads_ctl
self.ctl_file_lines = list()
self.cur_no = -1
self.parser_mapper = {
'ctl_file_name': self._parse_ctl_file_name,
'dset': self._parse_dset,
'options': self._parse_options,
'title': self._parse_title,
'undef': self._parse_undef,
'xdef': self._parse_dimension,
'ydef': self._parse_dimension,
'zdef': self._parse_dimension,
'tdef': self._parse_tdef,
'vars': self._parse_vars,
}
def set_ctl_file_path(self, ctl_file_path: Union[Path, str]):
self.ctl_file_path = Path(ctl_file_path)
with open(ctl_file_path) as f:
lines = f.readlines()
self.ctl_file_lines = [l.strip() for l in lines]
self.cur_no = 0
def parse(self, ctl_file_path: Union[Path, str]):
self.set_ctl_file_path(ctl_file_path)
total_lines = len(self.ctl_file_lines)
while self.cur_no < total_lines:
cur_line = self.ctl_file_lines[self.cur_no]
first_word = cur_line[0:cur_line.find(' ')]
if first_word.lower() in self.parser_mapper:
self.parser_mapper[first_word]()
self.cur_no += 1
self._parse_ctl_file_name()
return self.grads_ctl
def _parse_ctl_file_name(self):
ctl_file_name = Path(self.ctl_file_path).name
if (
self.grads_ctl.start_time is None
and self.grads_ctl.forecast_time is None
):
logger.debug("guess start time and forecast time")
if ctl_file_name.startswith("post.ctl_") or ctl_file_name.startswith("model.ctl_"):
file_name = ctl_file_name[ctl_file_name.index("_")+1:]
# check for GRAPES MESO:
# post.ctl_201408111202900
if re.match(r"[0-9]{15}", file_name):
self.grads_ctl.start_time = pd.to_datetime(file_name[:10], format="%Y%m%d%H")
self.grads_ctl.forecast_time = pd.Timedelta(hours=int(file_name[11:14]))
# check for GRAPES GFS:
# post.ctl_2014081112_001
elif re.match(r"[0-9]{10}_[0-9]{3}", file_name):
self.grads_ctl.start_time = pd.to_datetime(file_name[:10], format="%Y%m%d%H")
self.grads_ctl.forecast_time = pd.Timedelta(hours=int(file_name[12:]))
else:
logger.warning("We can't recognize ctl file name. ")
def _parse_dset(self):
"""
parse data file path:
dset ^postvar2021080200_024
dset ^postvar2021092700%f3%n2
"""
cur_line = self.ctl_file_lines[self.cur_no]
file_path = cur_line[4:].strip()
if "%" in file_path:
self.grads_ctl.dset_template = True
if file_path[0] == '^':
file_dir = Path(self.ctl_file_path).parent
file_path = Path(file_dir, file_path[1:])
self.grads_ctl.dset = file_path
def _parse_options(self):
cur_line = self.ctl_file_lines[self.cur_no]
options = cur_line[7:].strip().split(' ')
self.grads_ctl.options.extend(options)
for an_option in options:
if an_option == 'big_endian':
self.grads_ctl.data_endian = 'big'
elif an_option == 'little_endian':
self.grads_ctl.data_endian = 'little'
elif an_option == 'yrev':
self.grads_ctl.yrev = True
def _parse_title(self):
cur_line = self.ctl_file_lines[self.cur_no]
title = cur_line[5:].strip()
self.grads_ctl.title = title
def _parse_undef(self):
cur_line = self.ctl_file_lines[self.cur_no]
undef = cur_line[5:].strip()
self.grads_ctl.undef = float(undef)
def _parse_dimension(self):
"""
parser for keywords xdef, ydef and zdef
"""
cur_line = self.ctl_file_lines[self.cur_no].lower()
tokens = cur_line.split()
dim_name = tokens[0] # xdef, ydef, zdef
dimension_type = tokens[2]
dimension_parser_map = {
'linear': self._parse_linear_dimension,
'levels': self._parse_levels_dimension
}
if dimension_type in dimension_parser_map:
dimension_parser_map[dimension_type](dim_name, tokens)
else:
raise NotImplemented(f'dimension_type is not supported: {dimension_type}')
def _parse_linear_dimension(self, dim_name, tokens):
"""
Parse linear dimension
xdef 1440 linear 0.0000 0.2500
"""
if len(tokens) < 4:
raise Exception("%s parser error" % dim_name)
count = int(tokens[1])
start = float(tokens[3])
step = float(tokens[4])
levels = [start + step * n for n in range(count)]
setattr(self.grads_ctl, dim_name, {
'type': 'linear',
'count': count,
'start': start,
'step': step,
'values': levels
})
def _parse_levels_dimension(self, dim_name, tokens):
"""
Parse levels dimension
zdef 27 levels
1000.000
925.0000
850.0000
700.0000
...
"""
levels = list()
count = int(tokens[1])
if len(tokens) > 2:
levels = [float(l) for l in tokens[3:]]
i = len(levels)
while i < count:
self.cur_no += 1
cur_line = self.ctl_file_lines[self.cur_no]
levels.append(float(cur_line))
i += 1
setattr(self.grads_ctl, dim_name, {
'type': 'levels',
'count': count,
'values': levels
})
def _parse_tdef(self):
"""
Parse time dimension
tdef 1 linear 00z03AUG2021 360mn
"""
cur_line = self.ctl_file_lines[self.cur_no]
parts = cur_line.strip().split()
assert parts[2] == "linear"
assert len(parts) == 5
count = int(parts[1])
start_string = parts[3]
increment_string = parts[4]
start_date = GradsCtlParser._parse_start_time(start_string)
time_step = GradsCtlParser._parse_increment_time(increment_string)
values = [start_date + time_step * i for i in range(count)]
self.grads_ctl.tdef = {
'type': 'linear',
'count': count,
'start': start_date,
'step': time_step,
'values': values
}
@classmethod
def _parse_start_time(cls, start_string) -> pd.Timestamp:
"""
parse start time
format:
hh:mmZddmmmyyyy
where:
hh = hour (two digit integer)
mm = minute (two digit integer)
dd = day (one or two digit integer)
mmm = 3-character month
yyyy = year (may be a two or four digit integer;
2 digits implies a year between 1950 and 2049)
"""
start_date = pd.Timestamp.now()
if start_string[3] == ':':
raise NotImplemented('Not supported time with hh')
elif len(start_string) == 12:
start_date = pd.to_datetime(start_string.lower(), format='%Hz%d%b%Y')
else:
raise NotImplemented(f'start time not supported: {start_string}')
return start_date
@classmethod
def _parse_increment_time(cls, increment_string) -> pd.Timedelta:
"""
parse increment time
format:
vvkk
where:
vv = an integer number, 1 or 2 digits
kk = mn (minute)
hr (hour)
dy (day)
mo (month)
yr (year)
"""
vv = int(increment_string[:-2])
kk = increment_string[-2:]
kk_map = {
'mn': (lambda v: pd.Timedelta(minutes=v)),
'hr': (lambda v: pd.Timedelta(hours=v)),
'dy': (lambda v: | pd.Timedelta(days=v) | pandas.Timedelta |
import asyncio
import queue
import uuid
from datetime import datetime
import pandas as pd
from storey import build_flow, Source, Map, Filter, FlatMap, Reduce, FlowError, MapWithState, ReadCSV, Complete, AsyncSource, Choice, \
Event, Batch, Table, NoopDriver, WriteToCSV, DataframeSource, MapClass, JoinWithTable, ReduceToDataFrame, ToDataFrame, WriteToParquet, \
WriteToTSDB, Extend
class ATestException(Exception):
pass
class RaiseEx:
_counter = 0
def __init__(self, raise_after):
self._raise_after = raise_after
def raise_ex(self, element):
if self._counter == self._raise_after:
raise ATestException("test")
self._counter += 1
return element
def test_functional_flow():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3),
FlatMap(lambda x: [x, x * 10]),
Reduce(0, lambda acc, x: acc + x),
]).run()
for _ in range(100):
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3300
def test_csv_reader():
controller = build_flow([
ReadCSV('tests/test.csv', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def test_csv_reader_error_on_file_not_found():
controller = build_flow([
ReadCSV('tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FlowError as ex:
assert isinstance(ex.__cause__, FileNotFoundError)
def test_csv_reader_as_dict():
controller = build_flow([
ReadCSV('tests/test.csv', header=True, build_dict=True),
FlatMap(lambda x: [x['n1'], x['n2'], x['n3']]),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def append_and_return(lst, x):
lst.append(x)
return lst
def test_csv_reader_as_dict_with_key_and_timestamp():
controller = build_flow([
ReadCSV('tests/test-with-timestamp.csv', header=True, build_dict=True, key_field='k',
timestamp_field='t', timestamp_format='%d/%m/%Y %H:%M:%S'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].key == 'm1'
assert termination_result[0].time == datetime(2020, 2, 15, 2, 0)
assert termination_result[0].body == {'k': 'm1', 't': datetime(2020, 2, 15, 2, 0), 'v': 8, 'b': True}
assert termination_result[1].key == 'm2'
assert termination_result[1].time == datetime(2020, 2, 16, 2, 0)
assert termination_result[1].body == {'k': 'm2', 't': datetime(2020, 2, 16, 2, 0), 'v': 14, 'b': False}
def test_csv_reader_with_key_and_timestamp():
controller = build_flow([
ReadCSV('tests/test-with-timestamp.csv', header=True, key_field='k',
timestamp_field='t', timestamp_format='%d/%m/%Y %H:%M:%S'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
assert len(termination_result) == 2
assert termination_result[0].key == 'm1'
assert termination_result[0].time == datetime(2020, 2, 15, 2, 0)
assert termination_result[0].body == ['m1', datetime(2020, 2, 15, 2, 0), 8, True]
assert termination_result[1].key == 'm2'
assert termination_result[1].time == datetime(2020, 2, 16, 2, 0)
assert termination_result[1].body == ['m2', datetime(2020, 2, 16, 2, 0), 14, False]
def test_csv_reader_as_dict_no_header():
controller = build_flow([
ReadCSV('tests/test-no-header.csv', header=False, build_dict=True),
FlatMap(lambda x: [x[0], x[1], x[2]]),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 21
def test_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
controller = build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run()
termination_result = controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_indexed_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
df.set_index(['string', 'int'], inplace=True)
controller = build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run()
termination_result = controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_dataframe_source_with_metadata():
t1 = datetime(2020, 2, 15)
t2 = datetime(2020, 2, 16)
df = pd.DataFrame([['key1', t1, 'id1', 1.1], ['key2', t2, 'id2', 2.2]],
columns=['my_key', 'my_time', 'my_id', 'my_value'])
controller = build_flow([
DataframeSource(df, key_column='my_key', time_column='my_time', id_column='my_id'),
Reduce([], append_and_return, full_event=True),
]).run()
termination_result = controller.await_termination()
expected = [Event({'my_key': 'key1', 'my_time': t1, 'my_id': 'id1', 'my_value': 1.1}, key='key1', time=t1, id='id1'),
Event({'my_key': 'key2', 'my_time': t2, 'my_id': 'id2', 'my_value': 2.2}, key='key2', time=t2, id='id2')]
assert termination_result == expected
async def async_dataframe_source():
df = pd.DataFrame([['hello', 1, 1.5], ['world', 2, 2.5]], columns=['string', 'int', 'float'])
controller = await build_flow([
DataframeSource(df),
Reduce([], append_and_return),
]).run_async()
termination_result = await controller.await_termination()
expected = [{'string': 'hello', 'int': 1, 'float': 1.5}, {'string': 'world', 'int': 2, 'float': 2.5}]
assert termination_result == expected
def test_async_dataframe_source():
asyncio.run(async_test_async_source())
def test_error_flow():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Map(RaiseEx(500).raise_ex),
Reduce(0, lambda acc, x: acc + x),
]).run()
try:
for i in range(1000):
controller.emit(i)
except FlowError as flow_ex:
assert isinstance(flow_ex.__cause__, ATestException)
def test_broadcast():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3, termination_result_fn=lambda x, y: x + y),
[
Reduce(0, lambda acc, x: acc + x)
],
[
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 6
def test_broadcast_complex():
controller = build_flow([
Source(),
Map(lambda x: x + 1),
Filter(lambda x: x < 3, termination_result_fn=lambda x, y: x + y),
[
Reduce(0, lambda acc, x: acc + x),
],
[
Map(lambda x: x * 100),
Reduce(0, lambda acc, x: acc + x)
],
[
Map(lambda x: x * 1000),
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3303
# Same as test_broadcast_complex but without using build_flow
def test_broadcast_complex_no_sugar():
source = Source()
filter = Filter(lambda x: x < 3, termination_result_fn=lambda x, y: x + y)
source.to(Map(lambda x: x + 1)).to(filter)
filter.to(Reduce(0, lambda acc, x: acc + x), )
filter.to(Map(lambda x: x * 100)).to(Reduce(0, lambda acc, x: acc + x))
filter.to(Map(lambda x: x * 1000)).to(Reduce(0, lambda acc, x: acc + x))
controller = source.run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 3303
def test_map_with_state_flow():
controller = build_flow([
Source(),
MapWithState(1000, lambda x, state: (state, x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 1036
def test_map_with_cache_state_flow():
table_object = Table("table", NoopDriver())
table_object._cache['tal'] = {'color': 'blue'}
table_object._cache['dina'] = {'color': 'red'}
def enrich(event, state):
event['color'] = state['color']
state['counter'] = state.get('counter', 0) + 1
return event, state
controller = build_flow([
Source(),
MapWithState(table_object, lambda x, state: enrich(x, state), group_by_key=True),
Reduce([], append_and_return),
]).run()
for i in range(10):
key = 'tal'
if i % 3 == 0:
key = 'dina'
controller.emit(Event(body={'col1': i}, key=key))
controller.terminate()
termination_result = controller.await_termination()
expected = [{'col1': 0, 'color': 'red'},
{'col1': 1, 'color': 'blue'},
{'col1': 2, 'color': 'blue'},
{'col1': 3, 'color': 'red'},
{'col1': 4, 'color': 'blue'},
{'col1': 5, 'color': 'blue'},
{'col1': 6, 'color': 'red'},
{'col1': 7, 'color': 'blue'},
{'col1': 8, 'color': 'blue'},
{'col1': 9, 'color': 'red'}]
expected_cache = {'tal': {'color': 'blue', 'counter': 6}, 'dina': {'color': 'red', 'counter': 4}}
assert termination_result == expected
assert table_object._cache == expected_cache
def test_map_with_empty_cache_state_flow():
table_object = Table("table", NoopDriver())
def enrich(event, state):
if 'first_value' not in state:
state['first_value'] = event['col1']
event['diff_from_first'] = event['col1'] - state['first_value']
state['counter'] = state.get('counter', 0) + 1
return event, state
controller = build_flow([
Source(),
MapWithState(table_object, lambda x, state: enrich(x, state), group_by_key=True),
Reduce([], append_and_return),
]).run()
for i in range(10):
key = 'tal'
if i % 3 == 0:
key = 'dina'
controller.emit(Event(body={'col1': i}, key=key))
controller.terminate()
termination_result = controller.await_termination()
expected = [{'col1': 0, 'diff_from_first': 0},
{'col1': 1, 'diff_from_first': 0},
{'col1': 2, 'diff_from_first': 1},
{'col1': 3, 'diff_from_first': 3},
{'col1': 4, 'diff_from_first': 3},
{'col1': 5, 'diff_from_first': 4},
{'col1': 6, 'diff_from_first': 6},
{'col1': 7, 'diff_from_first': 6},
{'col1': 8, 'diff_from_first': 7},
{'col1': 9, 'diff_from_first': 9}]
expected_cache = {'dina': {'first_value': 0, 'counter': 4}, 'tal': {'first_value': 1, 'counter': 6}}
assert termination_result == expected
assert table_object._cache == expected_cache
def test_awaitable_result():
controller = build_flow([
Source(),
Map(lambda x: x + 1, termination_result_fn=lambda _, x: x),
[
Complete()
],
[
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
awaitable_result = controller.emit(i, return_awaitable_result=True)
assert awaitable_result.await_result() == i + 1
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 55
async def async_test_async_source():
controller = await build_flow([
AsyncSource(),
Map(lambda x: x + 1, termination_result_fn=lambda _, x: x),
[
Complete()
],
[
Reduce(0, lambda acc, x: acc + x)
]
]).run()
for i in range(10):
result = await controller.emit(i, await_result=True)
assert result == i + 1
await controller.terminate()
termination_result = await controller.await_termination()
assert termination_result == 55
def test_async_source():
loop = asyncio.new_event_loop()
loop.run_until_complete(async_test_async_source())
async def async_test_error_async_flow():
controller = await build_flow([
AsyncSource(),
Map(lambda x: x + 1),
Map(RaiseEx(500).raise_ex),
Reduce(0, lambda acc, x: acc + x),
]).run()
try:
for i in range(1000):
await controller.emit(i)
except FlowError as flow_ex:
assert isinstance(flow_ex.__cause__, ATestException)
def test_error_async_flow():
loop = asyncio.new_event_loop()
loop.run_until_complete(async_test_error_async_flow())
def test_choice():
small_reduce = Reduce(0, lambda acc, x: acc + x)
big_reduce = build_flow([
Map(lambda x: x * 100),
Reduce(0, lambda acc, x: acc + x)
])
controller = build_flow([
Source(),
Choice([(big_reduce, lambda x: x % 2 == 0)],
default=small_reduce,
termination_result_fn=lambda x, y: x + y)
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == 2025
def test_metadata():
def mapf(x):
x.key = x.key + 1
return x
def redf(acc, x):
if x.key not in acc:
acc[x.key] = []
acc[x.key].append(x.body)
return acc
controller = build_flow([
Source(),
Map(mapf, full_event=True),
Reduce({}, redf, full_event=True)
]).run()
for i in range(10):
controller.emit(Event(i, key=i % 3))
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == {1: [0, 3, 6, 9], 2: [1, 4, 7], 3: [2, 5, 8]}
def test_metadata_immutability():
def mapf(x):
x.key = 'new key'
return x
controller = build_flow([
Source(),
Map(lambda x: 'new body'),
Map(mapf, full_event=True),
Complete(full_event=True)
]).run()
event = Event('original body', key='original key')
result = controller.emit(event, return_awaitable_result=True).await_result()
controller.terminate()
controller.await_termination()
assert event.key == 'original key'
assert event.body == 'original body'
assert result.key == 'new key'
assert result.body == 'new body'
def test_batch():
controller = build_flow([
Source(),
Batch(4, 100),
Reduce([], lambda acc, x: append_and_return(acc, x)),
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
def test_batch_full_event():
def append_body_and_return(lst, x):
ll = []
for item in x:
ll.append(item.body)
lst.append(ll)
return lst
controller = build_flow([
Source(),
Batch(4, 100, full_event=True),
Reduce([], lambda acc, x: append_body_and_return(acc, x)),
]).run()
for i in range(10):
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
def test_batch_with_timeout():
q = queue.Queue(1)
def reduce_fn(acc, x):
if x[0] == 0:
q.put(None)
acc.append(x)
return acc
controller = build_flow([
Source(),
Batch(4, 1),
Reduce([], reduce_fn),
]).run()
for i in range(10):
if i == 3:
q.get()
controller.emit(i)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result == [[0, 1, 2], [3, 4, 5, 6], [7, 8, 9]]
async def async_test_write_csv(tmpdir):
file_path = f'{tmpdir}/test_write_csv/out.csv'
controller = await build_flow([
AsyncSource(),
WriteToCSV(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv(tmpdir):
asyncio.run(async_test_write_csv(tmpdir))
async def async_test_write_csv_error(tmpdir):
file_path = f'{tmpdir}/test_write_csv_error.csv'
write_csv = WriteToCSV(file_path)
controller = await build_flow([
AsyncSource(),
write_csv
]).run()
try:
for i in range(10):
await controller.emit(i)
await controller.terminate()
await controller.await_termination()
assert False
except FlowError as ex:
assert isinstance(ex.__cause__, TypeError)
def test_write_csv_error(tmpdir):
asyncio.run(async_test_write_csv_error(tmpdir))
def test_write_csv_with_dict(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_dict.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_infer_columns(tmpdir):
file_path = f'{tmpdir}/test_write_csv_infer_columns.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_infer_columns_without_header(tmpdir):
file_path = f'{tmpdir}/test_write_csv_infer_columns_without_header.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_with_metadata(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_metadata.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"event_key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_with_metadata_no_rename(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_metadata_no_rename.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['$key', 'n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_with_rename(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_rename.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['n', 'n x 10=n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "n,n x 10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result == expected
def test_write_csv_from_lists_with_metadata(tmpdir):
file_path = f'{tmpdir}/test_write_csv_with_metadata.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit([i, 10 * i], key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"event_key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_from_lists_with_metadata_and_column_pruning(tmpdir):
file_path = f'{tmpdir}/test_write_csv_from_lists_with_metadata_and_column_pruning.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert result == expected
def test_write_csv_infer_with_metadata_columns(tmpdir):
file_path = f'{tmpdir}/test_write_csv_infer_with_metadata_columns.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, columns=['event_key=$key'], header=True, infer_columns_from_data=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
with open(file_path) as file:
result = file.read()
expected = \
"event_key,n,n*10\nkey0,0,0\nkey1,1,10\nkey2,2,20\nkey3,3,30\nkey4,4,40\nkey5,5,50\nkey6,6,60\nkey7,7,70\nkey8,8,80\nkey9,9,90\n"
assert result == expected
def test_write_csv_fail_to_infer_columns(tmpdir):
file_path = f'{tmpdir}/test_write_csv_fail_to_infer_columns.csv'
controller = build_flow([
Source(),
WriteToCSV(file_path, header=True)
]).run()
try:
controller.emit([0])
controller.terminate()
controller.await_termination()
assert False
except FlowError as flow_ex:
assert isinstance(flow_ex.__cause__, TypeError)
def test_reduce_to_dataframe():
controller = build_flow([
Source(),
ReduceToDataFrame()
]).run()
expected = []
for i in range(10):
controller.emit({'my_int': i, 'my_string': f'this is {i}'})
expected.append({'my_int': i, 'my_string': f'this is {i}'})
expected = pd.DataFrame(expected)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_reduce_to_dataframe_with_index():
index = 'my_int'
controller = build_flow([
Source(),
ReduceToDataFrame(index=index)
]).run()
expected = []
for i in range(10):
controller.emit({'my_int': i, 'my_string': f'this is {i}'})
expected.append({'my_int': i, 'my_string': f'this is {i}'})
expected = pd.DataFrame(expected)
expected.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_reduce_to_dataframe_with_index_from_lists():
index = 'my_int'
controller = build_flow([
Source(),
ReduceToDataFrame(index=index, columns=['my_int', 'my_string'])
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append({'my_int': i, 'my_string': f'this is {i}'})
expected = pd.DataFrame(expected)
expected.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_reduce_to_dataframe_indexed_by_key():
index = 'my_key'
controller = build_flow([
Source(),
ReduceToDataFrame(index=index, insert_key_column_as=index)
]).run()
expected = []
for i in range(10):
controller.emit({'my_int': i, 'my_string': f'this is {i}'}, key=f'key{i}')
expected.append({'my_int': i, 'my_string': f'this is {i}', 'my_key': f'key{i}'})
expected = pd.DataFrame(expected)
expected.set_index(index, inplace=True)
controller.terminate()
termination_result = controller.await_termination()
assert termination_result.equals(expected), f"{termination_result}\n!=\n{expected}"
def test_to_dataframe_with_index():
index = 'my_int'
controller = build_flow([
Source(),
Batch(5),
ToDataFrame(index=index),
Reduce([], append_and_return, full_event=True)
]).run()
expected1 = []
for i in range(5):
data = {'my_int': i, 'my_string': f'this is {i}'}
controller.emit(data)
expected1.append(data)
expected2 = []
for i in range(5, 10):
data = {'my_int': i, 'my_string': f'this is {i}'}
controller.emit(data)
expected2.append(data)
expected1 = pd.DataFrame(expected1)
expected2 = | pd.DataFrame(expected2) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2021/11/13 10:31
# @Author : <NAME>
# @FileName: plugins.py
# @Usage:
# @Note:
# @E-mail: <EMAIL>
import os
import numpy as np
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbimakeblastdbCommandline
from Bio.Restriction import Restriction as Rst
from Bio.Restriction.Restriction_Dictionary import rest_dict, typedict
from functools import reduce
from collections import defaultdict
from Bio.SeqUtils import GC
def get_seq(_seq, _start, _end, _flanking_length):
if _start <= _flanking_length:
return _seq.seq[len(_seq.seq)-_flanking_length-1+_start:] + _seq.seq[0: _end+_flanking_length]
elif _end >= len(_seq.seq)-_flanking_length:
return _seq.seq[_start-_flanking_length-1:] + _seq.seq[:_flanking_length-(len(_seq.seq)-_end)]
else:
return _seq.seq[_start-_flanking_length-1: _end+_flanking_length]
class Database:
def __init__(self, fa_path, db_path, _exec):
"""
Since chloroplast was a circular, we need cut the base at the beginning which equal to the length of the DSS
to the end
:param fa_path: the input fasta path
:param db_path: the output prepared database path
:param exec: the BLAST executor path. If it in the PATH, this could be empty
"""
self.in_path = fa_path
self.out_path = db_path
self.exec = _exec
def database_generate(self, _length=20):
fasta_in = SeqIO.parse(self.in_path, 'fasta')
fasta_out = []
for assembly in fasta_in:
assembly.seq = assembly.seq + assembly.seq[0:_length-1]
fasta_out.append(assembly)
SeqIO.write(fasta_out, self.out_path, 'fasta')
def copy_file(self):
open(self.out_path, 'wb').write(open(self.in_path, 'rb').read())
def database_blast(self):
database_cmd = NcbimakeblastdbCommandline(
cmd=os.path.join(self.exec, 'makeblastdb'),
dbtype='nucl',
input_file=self.out_path)
database_cmd()
print('IdenDSS database created success!')
class RFLP:
def __init__(self, _seqs, _start, _end, _enzymes):
"""
:param _seqs: a list of Bio.SeqRecord.SeqRecord
:param _start:
:param _end:
:param _enzymes: a list of character contained Restriction Enzymes' names
"""
self.enzymes = _enzymes
self.seqs = _seqs
self.start = _start
self.end = _end
@staticmethod
def create_enzyme(name):
e_types = [x for t, (x, y) in typedict.items() if name in y][0]
enzyme_types = tuple(getattr(Rst, x) for x in e_types)
return Rst.RestrictionType(name, enzyme_types, rest_dict[name])
def get_site_interval(self, name):
enzyme = self.create_enzyme(name)
rco_pos, *_, rco_site = enzyme.characteristic()
return {name: np.array([rco_pos, len(rco_site) - rco_pos])}
def analysis_pos(self, _seq, _rb, _rbi):
"""
:param _seq: Bio.Seq.Seq
:param _rb: Bio.Restriction.RestrictionBatch
:param _rbi: Dict used to correct the whole recognized site (Bio.Restriction just return the cut position)
:return: list: enzymes' names
"""
rb_res = _rb.search(_seq)
res = {}
res_enzymes = []
for _enzyme, _pos_list in rb_res.items():
_enzyme_list = []
if len(_pos_list) == 1:
_enzyme_list.append(_pos_list[0] + _rbi[str(_enzyme)])
res[_enzyme] = _enzyme_list
for _enzyme, _pos in res.items():
if len(_pos) > 0:
if _pos[0][1] > self.start and self.end > _pos[0][0]:
res_enzymes.append(str(_enzyme))
return res_enzymes
def rflp(self):
enzyme_list = self.enzymes
rb = reduce(lambda x, y: x + y, map(self.create_enzyme, enzyme_list))
rbi = defaultdict()
for _ in map(self.get_site_interval, enzyme_list):
rbi.update(_)
res_list = []
for _seq in self.seqs:
enzymes = self.analysis_pos(_seq.seq, rb, rbi)
if enzymes:
_tmp_dict = dict(zip(('assembly', 'start', 'end'), _seq.id.split('-')))
_tmp_dict['enzyme'] = ','.join(enzymes)
res_list.append(_tmp_dict)
else:
continue
return pd.DataFrame(res_list)
def search_rflp(args):
# Restriction Enzymes
with open(os.path.abspath(os.path.join(__file__, '../../template/RestrictionEnzymes'))) as _f_in:
enzymes_list = _f_in.read().strip().split('\n')
# import DSS result
with open(args.input, 'r') as f_in:
file_list = f_in.read().strip().split('\n')
# import database
_meta_fasta = SeqIO.to_dict(SeqIO.parse(args.database, 'fasta'))
# do RFLP search
for _file in file_list:
_dss_tb = pd.read_table(_file)
if sum(_dss_tb['seq'].isna()) == 1:
continue
_dss_tb[['start', 'end']] = _dss_tb.apply((lambda x: x['position'].split('-')), axis=1, result_type="expand")
in_list = []
_write_list = []
for _idx, _item in _dss_tb.iterrows():
if not args.circular:
if int(_item['start']) < 250 or int(_item['end']) > len(_meta_fasta[_item['assembly']].seq) - 250:
continue
in_list.append(
SeqRecord(seq=get_seq(_meta_fasta[_item['assembly']], int(_item['start']), int(_item['end']), 250),
id=_item['assembly']+'-'+_item['start']+'-'+_item['end'])
)
rflp_ins = RFLP(in_list, 251, 290, enzymes_list)
_tmp_df = rflp_ins.rflp()
if _tmp_df.empty:
continue
else:
_tmp_df = _tmp_df.merge(_dss_tb, how='left')
_tmp_df[['group', 'assembly', 'seq', 'position', 'GC', 'enzyme']].to_csv(
os.path.join(args.output, os.path.splitext(os.path.split(_file)[1])[0] + '_rflp.txt'),
sep='\t',
index=False
)
def combine(args):
with open(args.input, 'r') as f_in:
file_list = f_in.read().strip().split('\n')
for _file in file_list:
_dss_tb = pd.read_table(_file)
if sum(_dss_tb['seq'].isna()) == 1:
continue
_prefix = '.'.join(os.path.split(_file)[1].split('.')[:-1])
_dss_tb[['start', 'end']] = _dss_tb.position.str.split('-', expand=True)
_dss_tb[['start', 'end']] = _dss_tb[['start', 'end']].astype(int)
_dss_tb.sort_values(by="start", inplace=True)
_combined_list = []
_seq = ''
_start_pos = None
_end_pos = None
pointer = -1
for _idx, _row in _dss_tb.iterrows():
if not _row['start'] == pointer + 1:
_combined_list.append({'seq': _seq, 'start': _start_pos, 'end': _end_pos, 'GC': GC(_seq)})
_seq = _row['seq']
_start_pos = _row['start']
pointer = _row['start']
else:
_end_pos = _row['end']
_seq += _row['seq'][-1]
pointer += 1
_combined_list = _combined_list[1:]
combined_res = | pd.DataFrame(_combined_list) | pandas.DataFrame |
import logging
import os
import re
import warnings
from multiprocessing import Pool
from contextlib import ExitStack
import numpy as np
import pandas as pd
import tables
from tables import open_file
from tqdm import tqdm
import astropy.units as u
from astropy.table import Table, vstack, QTable
from ctapipe.containers import SimulationConfigContainer
from ctapipe.instrument import SubarrayDescription
from ctapipe.io import HDF5TableReader, HDF5TableWriter
from eventio import Histograms, EventIOFile
from eventio.search_utils import yield_toplevel_of_type, yield_all_subobjects
from eventio.simtel.objects import History, HistoryConfig
from pyirf.simulations import SimulatedEventsInfo
from .lstcontainers import (
ExtraMCInfo,
MetaData,
ThrownEventsHistogram,
)
log = logging.getLogger(__name__)
__all__ = [
'add_column_table',
'add_config_metadata',
'add_global_metadata',
'add_source_filenames',
'auto_merge_h5files',
'check_mcheader',
'check_metadata',
'check_thrown_events_histogram',
'copy_h5_nodes',
'extract_simulation_nsb',
'extract_observation_time',
'get_dataset_keys',
'get_srcdep_assumed_positions',
'get_srcdep_params',
'get_stacked_table',
'global_metadata',
'merge_dl2_runs',
'merging_check',
'parse_cfg_bytestring',
'read_data_dl2_to_QTable',
'read_dl2_params',
'read_mc_dl2_to_QTable',
'read_metadata',
'read_simtel_energy_histogram',
'read_simu_info_hdf5',
'read_simu_info_merged_hdf5',
'recursive_copy_node',
'stack_tables_h5files',
'write_calibration_data',
'write_dataframe',
'write_dl2_dataframe',
'write_mcheader',
'write_metadata',
'write_simtel_energy_histogram',
'write_subarray_tables',
]
dl1_params_tel_mon_ped_key = "/dl1/event/telescope/monitoring/pedestal"
dl1_params_tel_mon_cal_key = "/dl1/event/telescope/monitoring/calibration"
dl1_params_tel_mon_flat_key = "/dl1/event/telescope/monitoring/flatfield"
dl1_params_lstcam_key = "/dl1/event/telescope/parameters/LST_LSTCam"
dl1_images_lstcam_key = "/dl1/event/telescope/image/LST_LSTCam"
dl2_params_lstcam_key = "/dl2/event/telescope/parameters/LST_LSTCam"
dl1_params_src_dep_lstcam_key = "/dl1/event/telescope/parameters_src_dependent/LST_LSTCam"
dl2_params_src_dep_lstcam_key = "/dl2/event/telescope/parameters_src_dependent/LST_LSTCam"
HDF5_ZSTD_FILTERS = tables.Filters(
complevel=5, # enable compression, 5 is a good tradeoff between compression and speed
complib='blosc:zstd', # compression using blosc/zstd
fletcher32=True, # attach a checksum to each chunk for error correction
bitshuffle=False, # for BLOSC, shuffle bits for better compression
)
def read_simu_info_hdf5(filename):
"""
Read simu info from an hdf5 file
Parameters
----------
filename: str
path to the HDF5 file
Returns
-------
`ctapipe.containers.SimulationConfigContainer`
"""
with HDF5TableReader(filename) as reader:
mc_reader = reader.read("/simulation/run_config", SimulationConfigContainer())
mc = next(mc_reader)
return mc
def read_simu_info_merged_hdf5(filename):
"""
Read simu info from a merged hdf5 file.
Check that simu info are the same for all runs from merged file
Combine relevant simu info such as num_showers (sum)
Note: works for a single run file as well
Parameters
----------
filename: path to an hdf5 merged file
Returns
-------
`ctapipe.containers.SimulationConfigContainer`
"""
with open_file(filename) as file:
simu_info = file.root["simulation/run_config"]
colnames = simu_info.colnames
skip = {"num_showers", "shower_prog_start", "detector_prog_start", "obs_id"}
for k in filter(lambda k: k not in skip, colnames):
assert np.all(simu_info[:][k] == simu_info[0][k])
num_showers = simu_info[:]["num_showers"].sum()
combined_mcheader = read_simu_info_hdf5(filename)
combined_mcheader["num_showers"] = num_showers
for k in combined_mcheader.keys():
if (
combined_mcheader[k] is not None
and combined_mcheader.fields[k].unit is not None
):
combined_mcheader[k] = u.Quantity(
combined_mcheader[k], combined_mcheader.fields[k].unit
)
return combined_mcheader
def get_dataset_keys(h5file):
"""
Return a list of all dataset keys in a HDF5 file
Parameters
----------
filename: str - path to the HDF5 file
Returns
-------
list of keys
"""
# we use exit_stack to make sure we close the h5file again if it
# was not an already open tables.File
exit_stack = ExitStack()
with exit_stack:
if not isinstance(h5file, tables.File):
h5file = exit_stack.enter_context(tables.open_file(h5file, 'r'))
dataset_keys = [
node._v_pathname
for node in h5file.root._f_walknodes()
if not isinstance(node, tables.Group)
]
return dataset_keys
def get_stacked_table(filenames_list, node):
"""
Stack tables at node from each file in files
Parameters
----------
filenames_list: list of paths
node: str
Returns
-------
`astropy.table.Table`
"""
try:
files = [open_file(filename) for filename in filenames_list]
except:
print("Can't open files")
table_list = [Table(file.root[node][:]) for file in files]
[file.close() for file in files]
return vstack(table_list)
def stack_tables_h5files(filenames_list, output_filename="merged.h5", keys=None):
"""
In theory similar to auto_merge_h5files but slower. Keeping it for reference.
Merge h5 files produced by lstchain using astropy.
A list of keys (corresponding to file nodes) that need to be included in the merge can be given.
If None, all tables in the file will be merged.
Parameters
----------
filenames_list: list of str
output_filename: str
keys: None or list of str
"""
keys = get_dataset_keys(filenames_list[0]) if keys is None else keys
for k in keys:
merged_table = get_stacked_table(filenames_list, k)
merged_table.write(output_filename, path=k, append=True)
def copy_h5_nodes(from_file, to_file, nodes=None):
'''
Copy dataset (Table and Array) nodes from ``from_file`` to ``to_file``.
Parameters
----------
from_file: tables.File
input h5 file opened with tables
to_file: tables.File
output h5 file opened with tables, must be writable
node_keys: Iterable[str]
Keys to copy, if None, all Table and Array nodes in ``from_file``
are copied.
'''
if nodes is None:
keys = set(get_dataset_keys(from_file))
else:
keys = set(nodes)
groups = set()
with warnings.catch_warnings():
# when copying nodes, we have no control over names
# so it does not make sense to warn about them
warnings.simplefilter('ignore', tables.NaturalNameWarning)
for k in keys:
in_node = from_file.root[k]
parent_path = in_node._v_parent._v_pathname
name = in_node._v_name
groups.add(parent_path)
if isinstance(in_node, tables.Table):
t = to_file.create_table(
parent_path,
name,
createparents=True,
obj=from_file.root[k].read()
)
for att in from_file.root[k].attrs._f_list():
t.attrs[att] = from_file.root[k].attrs[att]
elif isinstance(in_node, tables.Array):
a = to_file.create_array(
parent_path,
name,
createparents=True,
obj=from_file.root[k].read()
)
for att in from_file.root[k].attrs._f_list():
a.attrs[att] = in_node.attrs[att]
# after copying the datasets, also make sure we copy group metadata
# of all copied groups
for k in groups:
from_node = from_file.root[k]
to_node = to_file.root[k]
for attr in from_node._v_attrs._f_list():
to_node._v_attrs[attr] = from_node._v_attrs[attr]
def auto_merge_h5files(
file_list,
output_filename="merged.h5",
nodes_keys=None,
merge_arrays=False,
filters=HDF5_ZSTD_FILTERS,
progress_bar=True
):
"""
Automatic merge of HDF5 files.
A list of nodes keys can be provided to merge only these nodes. If None, all nodes are merged.
It may be also used to create a new file output_filename from content stored in another file.
Parameters
----------
file_list: list of path
output_filename: path
nodes_keys: list of path
merge_arrays: bool
filters
progress_bar : bool
Enabling the display of the progress bar during event processing.
"""
file_list = list(file_list)
if len(file_list) > 1:
file_list = merging_check(file_list)
if nodes_keys is None:
keys = set(get_dataset_keys(file_list[0]))
else:
keys = set(nodes_keys)
bar = tqdm(total=len(file_list), disable=not progress_bar)
with open_file(output_filename, 'w', filters=filters) as merge_file:
with open_file(file_list[0]) as f1:
copy_h5_nodes(f1, merge_file, nodes=keys)
bar.update(1)
for filename in file_list[1:]:
common_keys = keys.intersection(get_dataset_keys(filename))
with open_file(filename) as file:
for k in common_keys:
in_node = file.root[k]
out_node = merge_file.root[k]
try:
if isinstance(in_node, tables.table.Table) or merge_arrays:
# doing `.astype(out_node.dtype)` fixes an issue
# when dtypes do not exactly match but are convertible
# https://github.com/cta-observatory/cta-lstchain/issues/671
out_node.append(in_node.read().astype(out_node.dtype))
except:
log.error("Can't append node {} from file {}".format(k, filename))
raise
bar.update(1)
add_source_filenames(merge_file, file_list)
# merge global metadata and store source file names
metadata0 = read_metadata(file_list[0])
write_metadata(metadata0, output_filename)
def add_source_filenames(h5file, file_list):
exit_stack = ExitStack()
with exit_stack:
if not isinstance(h5file, tables.File):
h5file = exit_stack.enter_context(tables.open_file(h5file, 'a'))
# we replace any existing node
if "/source_filenames" in h5file.root:
h5file.remove_node("/", "source_filenames", recursive=True)
file_list = [str(p) for p in file_list]
sources_group = h5file.create_group("/", "source_filenames", "List of input files")
h5file.create_array(sources_group, "filenames", file_list, "List of files merged")
def merging_check(file_list):
"""
Check that a list of hdf5 files are compatible for merging regarding:
- array info
- metadata
- MC simu info (only for simulations)
- MC histograms (only for simulations)
Parameters
----------
file_list: list of paths to hdf5 files
Returns
-------
list: list of paths of files that can be merged
"""
if len(file_list) < 2:
raise ValueError("Need at least two files for merging")
mergeable_list = file_list.copy()
first_file = mergeable_list[0]
subarray_info0 = SubarrayDescription.from_hdf(first_file)
metadata0 = read_metadata(first_file)
if subarray_info0.name == "MonteCarloArray":
mcheader0 = read_simu_info_hdf5(first_file)
thrown_events_hist0 = read_simtel_energy_histogram(first_file)
for filename in mergeable_list[1:]:
try:
metadata = read_metadata(filename)
check_metadata(metadata0, metadata)
subarray_info = SubarrayDescription.from_hdf(filename)
if subarray_info0.name == "MonteCarloArray":
mcheader = read_simu_info_hdf5(filename)
thrown_events_hist = read_simtel_energy_histogram(filename)
check_mcheader(mcheader0, mcheader)
check_thrown_events_histogram(thrown_events_hist0, thrown_events_hist)
if subarray_info != subarray_info0:
raise ValueError('Subarrays do not match')
except ValueError as e:
log.error(rf"{filename} cannot be merged '¯\_(ツ)_/¯: {e}'")
mergeable_list.remove(filename)
return mergeable_list
def write_simtel_energy_histogram(source, output_filename, obs_id=None, filters=HDF5_ZSTD_FILTERS, metadata={}):
"""
Write the energy histogram from a simtel source to a HDF5 file
Parameters
----------
source: `ctapipe.io.EventSource`
output_filename: str
obs_id: float, int, str or None
"""
# Writing histograms
with HDF5TableWriter(
filename=output_filename, group_name="simulation", mode="a", filters=filters
) as writer:
writer.meta = metadata
for hist in yield_toplevel_of_type(source.file_, Histograms):
pass
# find histogram id 6 (thrown energy)
thrown = None
for hist in source.file_.histograms:
if hist["id"] == 6:
thrown = hist
thrown_hist = ThrownEventsHistogram()
thrown_hist.fill_from_simtel(thrown)
thrown_hist.obs_id = obs_id
if metadata is not None:
add_global_metadata(thrown_hist, metadata)
writer.write("thrown_event_distribution", [thrown_hist])
def read_simtel_energy_histogram(filename):
"""
Read the simtel energy histogram from a HDF5 file.
Parameters
----------
filename: path
Returns
-------
`lstchain.io.lstcontainers.ThrownEventsHistogram`
"""
with HDF5TableReader(filename=filename) as reader:
histtab = reader.read(
"/simulation/thrown_event_distribution", ThrownEventsHistogram()
)
hist = next(histtab)
return hist
def write_mcheader(mcheader, output_filename, obs_id=None, filters=HDF5_ZSTD_FILTERS, metadata=None):
"""
Write the mcheader from an event container to a HDF5 file
Parameters
----------
output_filename: str
"""
extramc = ExtraMCInfo()
extramc.prefix = "" # get rid of the prefix
if metadata is not None:
add_global_metadata(mcheader, metadata)
add_global_metadata(extramc, metadata)
with HDF5TableWriter(
filename=output_filename, group_name="simulation", mode="a", filters=filters
) as writer:
extramc.obs_id = obs_id
writer.write("run_config", [extramc, mcheader])
def check_mcheader(mcheader1, mcheader2):
"""
Check that the information in two mcheaders are physically consistent.
Parameters
----------
mcheader1: `ctapipe.containers.SimulationConfigContainer`
mcheader2: `ctapipe.containers.SimulationConfigContainer`
Returns
-------
"""
if mcheader1.keys() != mcheader2.keys():
different = set(mcheader1.keys()).symmetric_difference(mcheader2.keys())
raise ValueError(f'MC header keys do not match, differing keys: {different}')
# It does not matter that the number of simulated showers is the same
keys = list(mcheader1.keys())
"""keys that don't need to be checked: """
for k in [
"num_showers",
"shower_reuse",
"detector_prog_start",
"detector_prog_id",
"shower_prog_id",
"shower_prog_start",
]:
if k in keys:
keys.remove(k)
for k in keys:
v1 = mcheader1[k]
v2 = mcheader2[k]
if v1 != v2:
raise ValueError(f'MC headers do not match for key {k}: {v1!r} / {v2!r}')
def check_thrown_events_histogram(thrown_events_hist1, thrown_events_hist2):
"""
Check that two ThrownEventsHistogram class are compatible with each other
Parameters
----------
thrown_events_hist1: `lstchain.io.lstcontainers.ThrownEventsHistogram`
thrown_events_hist2: `lstchain.io.lstcontainers.ThrownEventsHistogram`
"""
keys1 = set(thrown_events_hist1.keys())
keys2 = set(thrown_events_hist2.keys())
if keys1 != keys2:
different = keys1.symmetric_difference(keys2)
raise ValueError(f'Histogram keys do not match, differing keys: {different}')
# It does not matter that the number of simulated showers is the same
keys = ["bins_energy", "bins_core_dist"]
for k in keys:
if (thrown_events_hist1[k] != thrown_events_hist2[k]).all():
raise ValueError(f'Key {k} does not match for histograms')
def write_metadata(metadata, output_filename):
"""
Write metadata to a HDF5 file
Parameters
----------
metadata: `lstchain.io.MetaData()`
output_filename: path
"""
# One cannot write strings with ctapipe HDF5Writer and Tables can write only fixed length string
# So this metadata is written in the file attributes
with open_file(output_filename, mode="a") as file:
for k, item in metadata.as_dict().items():
if k in file.root._v_attrs and type(file.root._v_attrs) is list:
attribute = file.root._v_attrs[k].extend(metadata[k])
file.root._v_attrs[k] = attribute
else:
file.root._v_attrs[k] = metadata[k]
def read_metadata(filename):
"""
Read metadata from a HDF5 file
Parameters
----------
filename: path
"""
metadata = MetaData()
with open_file(filename) as file:
for k in metadata.keys():
try:
metadata[k] = file.root._v_attrs[k]
except:
# this ensures retro and forward reading compatibility
print("Metadata {} does not exist in file {}".format(k, filename))
return metadata
def check_metadata(metadata1, metadata2):
"""
Check that to MetaData class are compatible with each other
Parameters
----------
metadata1: `lstchain.io.MetaData`
metadata2: `lstchain.io.MetaData`
"""
keys1 = set(metadata1.keys())
keys2 = set(metadata2.keys())
if keys1 != keys2:
different = keys1.symmetric_difference(keys2)
raise ValueError(f'Metadata keys do not match, differing keys: {different}')
keys = ["LSTCHAIN_VERSION"]
for k in keys:
v1 = metadata1[k]
v2 = metadata2[k]
if v1 != v2:
raise ValueError(f'Metadata does not match for key {k}: {v1!r} / {v2!r}')
def global_metadata():
"""
Get global metadata container
Returns
-------
`lstchain.io.lstcontainers.MetaData`
"""
from ctapipe import __version__ as ctapipe_version
from ctapipe_io_lst import __version__ as ctapipe_io_lst_version
from .. import __version__ as lstchain_version
metadata = MetaData()
metadata.LSTCHAIN_VERSION = lstchain_version
metadata.CTAPIPE_VERSION = ctapipe_version
metadata.CTAPIPE_IO_LST_VERSION = ctapipe_io_lst_version
metadata.CONTACT = "LST Consortium"
return metadata
def add_global_metadata(container, metadata):
"""
Add global metadata to a container in container.meta
Parameters
----------
container: `ctapipe.containers.Container`
metadata: `lstchain.io.lstchainers.MetaData`
"""
meta_dict = metadata.as_dict()
for k, item in meta_dict.items():
container.meta[k] = item
def add_config_metadata(container, configuration):
"""
Add configuration parameters to a container in container.meta.config
Parameters
----------
container: `ctapipe.containers.Container`
configuration: config dict
"""
linted_config = str(configuration)
linted_config = linted_config.replace("<LazyConfigValue {}>", "None")
linted_config = re.sub(r"<LazyConfigValue\svalue=(.*?)>", "\\1", linted_config)
linted_config = re.sub(r"DeferredConfigString\((.*?)\)", "\\1", linted_config)
linted_config = re.sub(r"PosixPath\((.*?)\)", "\\1", linted_config)
linted_config = linted_config.replace("\'", "\"")
linted_config = linted_config.replace("None", "\"None\"")
linted_config = linted_config.replace("inf", "\"inf\"")
linted_config = linted_config.replace("True", "true")
linted_config = linted_config.replace("False", "false")
container.meta["config"] = linted_config
def write_subarray_tables(writer, event, metadata=None):
"""
Write subarray tables info to a HDF5 file
Parameters
----------
writer: `ctapipe.io.HDF5Writer`
event: `ctapipe.containers.ArrayEventContainer`
metadata: `lstchain.io.lstcontainers.MetaData`
"""
if metadata is not None:
add_global_metadata(event.index, metadata)
add_global_metadata(event.simulation, metadata)
add_global_metadata(event.trigger, metadata)
writer.write(table_name="subarray/trigger", containers=[event.index, event.trigger])
def write_dataframe(dataframe, outfile, table_path, mode="a", index=False, config=None, meta=None):
"""
Write a pandas dataframe to a HDF5 file using pytables formatting.
Parameters
----------
dataframe: `pandas.DataFrame`
outfile: path
table_path: str
path to the table to write in the HDF5 file
config: config metadata
meta: global metadata
"""
if not table_path.startswith("/"):
table_path = "/" + table_path
with tables.open_file(outfile, mode=mode) as f:
path, table_name = table_path.rsplit("/", maxsplit=1)
t = f.create_table(
path,
table_name,
dataframe.to_records(index=index),
createparents=True,
)
if config:
t.attrs["config"] = config
if meta:
for k, item in meta.as_dict().items():
t.attrs[k] = item
def write_dl2_dataframe(dataframe, outfile, config=None, meta=None):
"""
Write DL2 dataframe to a HDF5 file
Parameters
----------
dataframe: `pandas.DataFrame`
outfile: path
config: config metadata
meta: global metadata
"""
write_dataframe(dataframe, outfile=outfile, table_path=dl2_params_lstcam_key, config=config, meta=meta)
def add_column_table(table, ColClass, col_label, values):
"""
Add a column to an pytable Table
Parameters
----------
table: `tables.table.Table`
ColClass: `tables.atom.MetaAtom`
col_label: str
values: list or `numpy.ndarray`
Returns
-------
`tables.table.Table`
"""
# Step 1: Adjust table description
d = table.description._v_colobjects.copy() # original description
d[col_label] = ColClass() # add column
# Step 2: Create new temporary table:
newtable = tables.Table(
table._v_file.root, "_temp_table", d, filters=table.filters
) # new table
table.attrs._f_copy(newtable) # copy attributes
# Copy table rows, also add new column values:
for row, value in zip(table, values):
newtable.append([tuple(list(row[:]) + [value])])
newtable.flush()
# Step 3: Move temporary table to original location:
parent = table._v_parent # original table location
name = table._v_name # original table name
table.remove() # remove original table
newtable.move(parent, name) # move temporary table to original location
return newtable
def recursive_copy_node(src_file, dir_file, path):
"""
Copy a node recursively from a src file to a dir file without copying the tables/arrays in the node
Parameters
----------
src_file: opened `tables.file.File`
dir_file: `tables.file.File` opened in writing mode
path: path to the node in `src_file`
"""
path_split = path.split('/')
while '' in path_split:
path_split.remove('')
assert len(path_split) > 0
src_file.copy_node('/',
name=path_split[0],
newparent=dir_file.root,
newname=path_split[0],
recursive=False)
if len(path_split) > 1:
recursive_path = os.path.join("/", path_split[0])
for p in path_split[1:]:
src_file.copy_node(
recursive_path,
name=p,
newparent=dir_file.root[recursive_path],
newname=p,
recursive=False,
)
recursive_path = os.path.join(recursive_path, p)
def write_calibration_data(writer, mon_index, mon_event, new_ped=False, new_ff=False):
mon_event.pedestal.prefix = ''
mon_event.flatfield.prefix = ''
mon_event.calibration.prefix = ''
mon_index.prefix = ''
# update index
if new_ped:
mon_index.pedestal_id += 1
if new_ff:
mon_index.flatfield_id += 1
mon_index.calibration_id += 1
if new_ped:
# write ped container
writer.write(
table_name="telescope/monitoring/pedestal",
containers=[mon_index, mon_event.pedestal],
)
if new_ff:
# write calibration container
writer.write(
table_name="telescope/monitoring/flatfield",
containers=[mon_index, mon_event.flatfield],
)
# write ff container
writer.write(
table_name="telescope/monitoring/calibration",
containers=[mon_index, mon_event.calibration],
)
def read_mc_dl2_to_QTable(filename):
"""
Read MC DL2 files from lstchain and convert into pyirf internal format
- astropy.table.QTable
Parameters
----------
filename: path
Returns
-------
`astropy.table.QTable`, `pyirf.simulations.SimulatedEventsInfo`
"""
# mapping
name_mapping = {
"mc_energy": "true_energy",
"mc_alt": "true_alt",
"mc_az": "true_az",
"mc_alt_tel": "pointing_alt",
"mc_az_tel": "pointing_az",
"gammaness": "gh_score",
}
unit_mapping = {
"true_energy": u.TeV,
"reco_energy": u.TeV,
"pointing_alt": u.rad,
"pointing_az": u.rad,
"true_alt": u.rad,
"true_az": u.rad,
"reco_alt": u.rad,
"reco_az": u.rad,
}
# add alpha for source-dependent analysis
srcdep_flag = dl2_params_src_dep_lstcam_key in get_dataset_keys(filename)
if srcdep_flag:
unit_mapping['alpha'] = u.deg
simu_info = read_simu_info_merged_hdf5(filename)
pyirf_simu_info = SimulatedEventsInfo(
n_showers=simu_info.num_showers * simu_info.shower_reuse,
energy_min=simu_info.energy_range_min,
energy_max=simu_info.energy_range_max,
max_impact=simu_info.max_scatter_range,
spectral_index=simu_info.spectral_index,
viewcone=simu_info.max_viewcone_radius,
)
events = pd.read_hdf(filename, key=dl2_params_lstcam_key)
if srcdep_flag:
events_srcdep = get_srcdep_params(filename, 'on')
events = pd.concat([events, events_srcdep], axis=1)
events = events.rename(columns=name_mapping)
events = QTable.from_pandas(events)
for k, v in unit_mapping.items():
events[k] *= v
return events, pyirf_simu_info
def read_data_dl2_to_QTable(filename, srcdep_pos=None):
"""
Read data DL2 files from lstchain and return QTable format
Parameters
----------
filename: path to the lstchain DL2 file
srcdep_pos: assumed source position for source-dependent analysis
Returns
-------
`astropy.table.QTable`
"""
# Mapping
name_mapping = {
"gammaness": "gh_score",
"alt_tel": "pointing_alt",
"az_tel": "pointing_az",
}
unit_mapping = {
"reco_energy": u.TeV,
"pointing_alt": u.rad,
"pointing_az": u.rad,
"reco_alt": u.rad,
"reco_az": u.rad,
"dragon_time": u.s,
}
# add alpha for source-dependent analysis
srcdep_flag = dl2_params_src_dep_lstcam_key in get_dataset_keys(filename)
if srcdep_flag:
unit_mapping['alpha'] = u.deg
data = pd.read_hdf(filename, key=dl2_params_lstcam_key)
if srcdep_flag:
data_srcdep = get_srcdep_params(filename, srcdep_pos)
data = | pd.concat([data, data_srcdep], axis=1) | pandas.concat |
#I. cleangot(): clean dfgot from wikiling.de
#1. insert links()
#2. every lemma() to own row
#3. occurences() to own col
#4. certainty() to own col
#5. reconstructedness() to own col
#6.a clean col lemma
#6.b clean col lemma
#6. translations()
#7.a activate got-ipa transcription file
#8 clean English translations
#9.a activate dictionary for translating pos-tag-names from wikiling to nltk
#9.b translate pos-tags from wikiling to nltk notation
#11. write empty file to fill in substitutions
#12. write clean dfgot.csv
#II. cleanuralonet(): clean uralonet_raw.csv from uralonet.nytud.hu
#1. turn sound to C for consonant or V for vowel
#2. get phonotactic profile of word
#3. activate transcription files with copy2epitran if not already activated while cleaning dfgot
#4. clean uralonet_raw.csv
#III. mine and clean zaicz.csv
#1. mine pdf
#2. create dictionary from webscraped txts of English-Hungarian dictionary (web-address: )
#3. create dictionary of word-origin pairs from zaicz.pdf
#4. read annex of zaicz pdf and tranform to csv (main input file)
#5. add missing translations with google translate
#6. add pos-tags with spacy
#!works on python 3.5. and spacy 2.0.12 (Hungarian pos_tagger)
#!create virtual environment via anaconda navigator for this function
#7. converts spacy's pos-tags to nltk's. https://spacy.io/api/annotation
#8.a translate origin-tags from Hungarian to English with google translate
#8.b insert translated origin-tags to df
#9.a convert origin to tags "U, FU, Ug"
#9.b insert new origin-tags "U, FU, Ug"
#10. remove brackets
#11. translate info-col to English
#11.b insert translated info-col to df
#imports for cleangot() and cleanuralonet()
import pandas as pd
import re #reconstr(), clemma(), ctransl()
import epitran #transcribe gothic to ipa
import os #copy2epitran()
import shutil #copy2epitran()
import itertools #for deletion()
from lingpy import ipa2tokens
from loanpy import word2struc
#imports for zaicz.csv
from bs4 import BeautifulSoup
import pdfminer
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams
from pdfminer.converter import TextConverter
from io import StringIO
from pdfminer.pdfpage import PDFPage
from googletrans import Translator
translator = Translator()
hu2en_origdict={}
origtagdict={}
posdict={}
cns="jwʘǀǃǂǁk͡pɡ͡bcɡkqɖɟɠɢʄʈʛbb͡ddd̪pp͡ttt̪ɓɗb͡βk͡xp͡ɸq͡χɡ͡ɣɢ͡ʁc͡çd͡ʒt͡ʃɖ͡ʐɟ͡ʝʈ͡ʂb͡vd̪͡z̪d̪͡ðd̪͡ɮ̪d͡zd͡ɮd͡ʑp͡ft̪͡s̪t̪͡ɬ̪t̪͡θt͡st͡ɕt͡ɬxçħɣʁʂʃʐʒʕʝχfss̪vzz̪ðɸβθɧɕɬɬ̪ɮʑɱŋɳɴmnn̪ɲʀʙʟɭɽʎrr̪ɫɺɾhll̪ɦðʲt͡ʃʲnʲʃʲC"
vow="ɑɘɞɤɵʉaeiouyæøœɒɔəɘɵɞɜɛɨɪɯɶʊɐʌʏʔɥɰʋʍɹɻɜ¨ȣ∅"
os.chdir(os.path.dirname(os.path.abspath(__file__))+r"\data\pre") #change to folder "data"
#I. Clean dfgot
#1. insert col links
def links():
linkliste=[]
for i in range(1,281): #number of pages
linkliste.append(20*["https://koeblergerhard.de/wikiling/?query=&f=got&mod=0&page="+str(i)]) #20 entries per page
linkliste=[item for sublist in linkliste for item in sublist][:5582] #flatten list
return linkliste
#2. explode lemmas
def explemma(graw):
graw.at[1373,"got_lemma"]=str(graw.iloc[1373,1]).replace("lat.got., ","") #clean line 1373
graw["got_lemma"]=graw["got_lemma"].str.split(", ") #bei Lemma stehen mehrere Grundformen durch ', ' separiert
graw=graw.explode("got_lemma").reset_index(drop=True) #diese Alternativformen in neue Reihen einfügen
return graw
#3. occurences to own col
def occurences(entry):
return re.findall("[0-9]+",entry)
#4. level of certainty to own col
def certainty(entry):
if "?" in entry:
return "uncertain"
else:
return "certain"
#5. reconstructedness to own col
def reconstr(entry): #https://www.koeblergerhard.de/got/3A/got_vorwort.html (ctrl+f: "Stern")
if re.search(r"\*\?? ?[0-9]* ?$",entry) is not None:
return "form" #other forms documented, basic form reconstructed
elif re.search(r"^\*",entry) is not None:
return "word" #other forms not documented, word itself reconstructed
else:
return ""
#6.a clean col "got_lemma"
def helpclean(filename,column):
chars=[]
df=pd.read_csv(filename,encoding="utf-8")
df=df.fillna("")
for i in df[column].tolist():
chars+=i
return set(chars)
#6.b clean col "got_lemma"
def clemma(entry):
entry.replace("Pl.","")
entry.lower()
return re.sub(r"[^a-zA-ZÀ-ÿþāēīōūƕ]+", "", entry) #use helpclean() to find out what to keep
#7. copy files to epitran\data\map and epitran\data\post to piggyback epitran
def copy2epitran():
epipath=epitran.__file__[:-(len("\epitran.py"))]+r"data"
dstmap = epipath+r"\map"
dstpost = epipath+r"\post"
srcgotmap = os.getcwd()+r"\got-translit.csv"
srcgotpost = os.getcwd()+r"\got-translit.txt"
srcuew = os.getcwd()+r"\uew-scrape.csv"
shutil.copy(srcgotmap,dstmap)
shutil.copy(srcgotpost,dstpost) #special rules go to folder "post"
shutil.copy(srcuew,dstmap)
#8 clean English translations
def ctransl(entry):
entry=re.sub(r" ?\([^)]+\)", "", entry) #remove parentheses and their content
entry=entry.replace(', ',',').replace(' (','(').replace(' ','_')
entry=re.sub(r"[^0-9A-Za-z,_äéþōƕ]+", "", entry) #use helpclean() to find out what to keep
entry=entry.replace(",", ", ")
return entry
#9.a activate dictionary of wikiling-pos-tags to nltk-pos-tags
def getposdict():
poskeys="Abkürzung,Adj.,Adv.,Art.,Buchstabe,F.,Interj.,Konj.,LN.,M.,N.,Num.,ON.,Partikel,PN.,Präp.,"\
"Pron.,Sb.,V.,Wort," #last comma important
posvalues=["r","a","r","r","r","n","r","r","n","n","n","r","n","r","n","r","r","n","v","n","nvar"]
global posdict
posdict = dict(zip(poskeys.split(','), posvalues))
#9.b translate wikiling pos-tags to nltk-pos tags
def nltktags(entry):
if posdict=={}:
getposdict()
nltktags=""
for i in entry.split(", "):
try:
nltktags+=posdict[i]
except KeyError:
return "nvar"
return nltktags
#11. write fillitout.csv
def fillitout(column): #automate this function later
fillout = pd.DataFrame({"to_substitute" : sorted(list(set([i for s in column.apply(ipa2tokens, merge_vowels=False, merge_geminates=False).tolist() for i in s])))})
fillout["substitution"]=""
fillout.to_csv("fillitout.csv",encoding="utf-8",index=False)
def cleangot(filename): #e.g. g_raw.csv
graw=pd.read_csv(filename, encoding="utf-8")
graw=graw.rename({"Lemma":"got_lemma"}, axis=1) #rename column
graw=graw.drop(['#', 'Sprachen'], axis=1)
graw=graw.fillna("") #else problems with nans
graw["links"] = links()
graw=explemma(graw)
graw["occurences"]=graw["got_lemma"].apply(occurences)
graw["got_certainty"]=graw["got_lemma"].apply(certainty)
graw["got_reconstructedness"]=graw["got_lemma"].apply(reconstr)
graw["got_lemma"]=graw["got_lemma"].apply(clemma)
graw=graw[graw["got_lemma"].astype(bool)] #remove rows where lemma turned empty after cleaning
copy2epitran() #copy files to epitran-folder
graw["got_ipa"]=graw["got_lemma"].apply(epitran.Epitran("got-translit").transliterate)
graw["got_en"]=graw["Englische Bedeutung"].apply(ctransl)
graw["got_pos"]=graw["Wortart"].apply(nltktags)
gotclean=graw
gotclean.to_csv("dfgot.csv",encoding="utf-8",index=False)
return gotclean
#################################################################################################################
#II. Clean uralonet_raw.csv
#1. turn sound to C for consonant or V for vowel
#2. get phonotactic profile of word
#3. activate transcription files with copy2epitran if not already activated while cleaning dfgot
#4. clean uralonet_raw.csv
def cleanuralonet(filename): #in: uralonet_raw.csv
df=pd.read_csv(filename,encoding="utf-8")
copy2epitran()
df["New"]=df.New_orth.apply(epitran.Epitran('hun-Latn').transliterate)
df["Old"]=df.Old_orth.apply(epitran.Epitran('uew-scrape').transliterate)
df["old_struc"]=df.Old.apply(word2struc)
df.to_csv("uralonet.csv",encoding="utf-8",index=False)
return df
###################################################################################################################
#III mine and clean zaicz.csv
#1. mine pdf
#2. create dictionary from webscraped txts of English-Hungarian dictionary (web-address: )
#3. create dictionary of word-origin pairs from zaicz.pdf
#4. read annex of zaicz pdf and tranform to csv (main input file)
#5. add missing translations with google translate
#6. add pos-tags with spacy
#!works on python 3.5. and spacy 2.0.12 (Hungarian pos_tagger)
#!create virtual environment via anaconda navigator for this function
#7. converts spacy's pos-tags to nltk's. https://spacy.io/api/annotation
#8.a translate origin-tags from Hungarian to English with google translate
#8.b insert translated origin-tags to df
#9.a convert origin to tags "U, FU, Ug"
#9.b insert new origin-tags "U, FU, Ug"
#10. remove brackets
#11. translate info-col to English
#11.b insert translated info-col to df
#1. mine pdf
def get_pdf_file_content(path_to_pdf):
resource_manager = PDFResourceManager(caching=True)
out_text = StringIO()
laParams = LAParams()
text_converter = TextConverter(resource_manager, out_text,laparams=laParams)
fp = open(path_to_pdf, 'rb')
interpreter = PDFPageInterpreter(resource_manager,text_converter)
for page in PDFPage.get_pages(fp,pagenos=set(),maxpages=0,password="",caching=True,check_extractable=True):
interpreter.process_page(page)
text= out_text.getvalue()
fp.close()
text_converter.close()
out_text.close()
return text
#2. create dictionary from webscraped txts of English-Hungarian dictionary (web-address: )
def getdict_huen():
hunendict={}
for i in range(ord('a'), ord('z')+1): #dictionary entries from a-z
print(chr(i))
if i != ord("q"): #only letter missing from dictionary's website is "q"
hul=[]
enl=[]
subdict={}
soup1=BeautifulSoup(open("szotar"+chr(i)+".txt").read())
soup1=soup1.body #cut out trash from beignning and end
for s in soup1.select('script'): #cut off anything between tag "script"(bottom)
s.extract()
for s in soup1.select('center'): #cut off anything between tag "center" (top)
s.extract()
zl= re.sub(r'\<.*?\>', '', str(soup1)) #remove tags <body> and <html> from top and bottom
if i == ord("z"): #z has some extra strings in the end that cause errors
zl=zl[1:-9] #cut off the troublesome strings
zlsplit=zl.split("\n\n ")[1:-1] #cut off first and last char, they cause errors
for j in zlsplit:
wordpair=j.split(" -» ") #split into hu and en word
hul.append(wordpair[0].replace("õ","ő").replace("û","ű"))#correct wrong encoding
enl.append(wordpair[1])
for index, j in enumerate(hul):
if j in hunendict:
hunendict[j].append(enl[index]) #add meaning if already in dict
else:
hunendict[j]=[enl[index]] #else create new entry
hunendict="hunendict="+str(hunendict)
with open('hunendict.py','w',encoding="utf-8") as data:
data.write(hunendict)
return hunendict
#3. create dictionary of word-origin pairs from zaicz.pdf
def getdictorig():
zaiczcsv=pd.DataFrame(columns=['word','year','info','disambiguated','suffix',"en"]) #dffinal
#zaicz1: year, zaicz2: origin
zaicz2=zaicz.split(' \n \n\n \n\n\x0cA SZAVAK EREDET SZERINTI CSOPORTOSÍTÁSA* \n\n \n \n \n \n \n \n',1)[1]
dictorig={}
zlist=zaicz2.split("\n \n")
for index,i in enumerate(zlist):
if index<101:
para=i.split("\n",1)
paratag=para[0]
if len(para)>1:
paratxt=para[1]
for i in paratxt.split(", "):
if i[-1]=="?":
dictorig[i.replace("x0c","").replace("\n","").replace("?","").replace(" ","")]=paratag+"?"
else:
dictorig[i.replace("x0c","").replace("\n","").replace(" ","")]=paratag
if index>=101 and (index % 2) ==0:
for j in i.split(", "):
if i[-1]=="?":
dictorig[j.replace("x0c","").replace("\n","").replace(" ","")]=zlist[index-1]+"?"
else:
dictorig[j.replace("x0c","").replace("\n","").replace(" ","")]=zlist[index-1]
dictorig="dictorig="+str(dictorig)
with open('dictorig.py','w',encoding="utf-8") as data:
data.write(dictorig)
return dictorig
#4. read annex of zaicz pdf and tranform to csv (main input file)
def zaicz2csv():
try:
from hunendict import hunendict
except:
print("create hunendict with getdict_huen()")
try:
from dictorig import dictorig
except:
print("create dictorig with getdict_huen()")
zaiczcsv=pd.DataFrame(columns=['word','year','info','disambiguated','suffix',"en","orig","pos_hun", "wordipa"]) #dffinal
#zaicz1: year, zaicz2: origin
path_to_pdf = r"C:\Users\Viktor\OneDrive\PhD cloud\Vorgehensweisen\loanpy6\szotar\TAMOP_annex.pdf"
zaicz=get_pdf_file_content(path_to_pdf)
zaicz=zaicz.replace("Valószínűleg ősi szavak","Valószínűleg ősi szavak\n") #correct typo in dictionary
zaicz1=zaicz.split(' \n \n\n \n\n\x0cA SZAVAK EREDET SZERINTI CSOPORTOSÍTÁSA* \n\n \n \n \n \n \n \n',1)[0]
zaicz2=zaicz.split(' \n \n\n \n\n\x0cA SZAVAK EREDET SZERINTI CSOPORTOSÍTÁSA* \n\n \n \n \n \n \n \n',1)[1]
#zaicz1 (year):
for index,i in enumerate(zaicz1.split('[')): #list of year-word pairs
if ':' in i: #otherwise error
zaiczcsv.at[index,'word']=i.split(':')[1].replace(" ","").replace("\n","") .replace("1951-től","").replace("1000-ig","")
zaiczcsv.at[index,'year']=re.sub("[^0-9]", "", i.split(':')[0].split(',')[-1]) #the sure year
zaiczcsv.at[index,'info']=i.split(':')[0][:-1].replace("\n","") #all other info
for index,row in zaiczcsv.iterrows():
zaiczcsv.at[index,'word']=row['word'].split(',') #explode funktioniert nur mit listen, darum split()
#explode, reset index,drop old index,remove rows with empty cells in column "word"
zcsv=zaiczcsv.explode('word')
zcsv=zcsv.reset_index(drop=True)
zcsv= zcsv[zcsv.word != '']
for index,row in zcsv.iterrows():
if len(row['year'])==2: #e.g. 20 ist left from "20.sz" (20th century) so we'll append "00"
zcsv.at[index,'year']=row['year']+'00'
if row['word'][-2:].isnumeric(): #remove headers (like "1001-1100")
zcsv.at[index,'word']=row['word'][:-9] #4+4+1 (year1+hyphen+year2)
zcsv.at[index,'disambiguated']=row['word']
if row['word'][-1].isnumeric(): #disambiguation to other column
zcsv.at[index,'word']=row['word'][:-1]
zcsv.at[index,'word']=row['word'].replace("~","/").split("/") #explode needs list, so split()
zcsv=zcsv.explode('word')
zcsv=zcsv.reset_index(drop=True) #reset index to 1,2,3,4,... again
for index,row in zcsv.iterrows():
if row['word'][0]=="-": #remove hyphens
zcsv.at[index,"word"]=row["word"][1:]
zcsv.at[index,"suffix"]="+" #mark that they're a suffix in extra column
#insert translations
try:
zcsv.at[index,"en"]=str(hunendict[row["word"]]).replace("[","").replace("]","").replace("'","").replace('"','').replace(" ","_").\
replace(",_",", ").replace("to_","")
#b/c semsim requires a string not a list
#b/c you can not store lists in a csv, only strings
except KeyError:
pass
try:
zcsv.at[index,"orig"]=dictorig[row["disambiguated"]]
except KeyError:
pass
zcsv.at[index,"wordipa"]=epi.transliterate(row["word"])
zcsv.to_csv("zcsv.csv", encoding="utf-8", index=False)
#5. add missing translations with google translate
def addgoogletrans():
zcsv=pd.read_csv("zcsv.csv",encoding="utf-8")
zcsv["en"]=zcsv["en"].fillna('0')
for index,row in zcsv.iterrows():
print(index)
if row["en"] =='0':
try:
zcsv.at[index,"en"]=translator.translate(row["word"], src='hu', dest='en').text
except:
zcsv.to_csv("zcsv.csv", encoding="utf-8", index=False)
sys.exit("googletrans fail")
zcsv.to_csv("zcsv.csv", encoding="utf-8", index=False)
#6. add pos-tags with spacy
#works on python 3.5. and spacy 2.0.12 (Hungarian pos_tagger)
#create virtual environment via anaconda navigator for this function
def getpos_hu(path):
zcsv=pd.read_csv(path, encoding='utf-8')
import hu_core_ud_lg #https://github.com/oroszgy/spacy-hungarian-models
nlp = hu_core_ud_lg.load()
for index,row in zcsv.iterrows():
doc=nlp(row["word"])
for i in doc:
zcsv.loc[[index],["pos_hun"]]=i.pos_ #df.at does not work (maybe bc older python version?)
print(index)
zcsv.to_csv("zaicz_in.csv", encoding="utf-8", index=False)
return zcsv
#7. converts spacy's pos-tags to nltk's. https://spacy.io/api/annotation
def spacy2nltk_postags(path):
zcsv=pd.read_csv(path,encoding="utf-8")
hunposdict={"ADJ":"a","ADP":"r","ADV":"r","AUX":"v","CONJ":"r","CCONJ":"r","DET":"r","INTJ":"r","NOUN":"n", "NUM":"r","PART":"r","PRON":"r","PROPN":"r","PUNCT":"r","SCONJ":"r","SYM":"r","VERB":"v","X":"r", "SPACE":"r"}
for index,row in zcsv.iterrows():
zcsv.at[index,"pos_hun"]=hunposdict[row["pos_hun"]]
zcsv.to_csv("zin3.csv", encoding="utf-8", index=False)
return zcsv
#8.a translate origin-tags from Hungarian to English with google translate
def orig_hu2en(path):
zin= | pd.read_csv(path,encoding="utf-8") | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
import pytrends
import os
from pytrends.request_1 import TrendReq
pytrend = TrendReq()
country = pd.read_csv(r"C:\Users\Dell\Desktop\livinglabcountries.csv")
country_list = list(country['living lab countries'])
city = | pd.DataFrame() | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
| _maybe_remove(store, "df") | pandas.tests.io.pytables.common._maybe_remove |
import base64
from pathlib import Path
import pandas as pd
import streamlit
import os
from pathlib import Path
import numpy as np
import pydeck as pdk
import random
from send_email import send_message
import streamlit as st
from PIL import Image
file_dir = Path(os.path.dirname(os.path.abspath(__file__)))
DATE_TIME = "date/time"
DATA_URL = file_dir / "data_long.csv"
@st.cache(persist=True)
def load_data(DATA_URL, nrows=None):
data = | pd.read_csv(DATA_URL, nrows=nrows) | pandas.read_csv |
"""
Get Massachusetts Data | Cannlytics
Authors: <NAME> <<EMAIL>>
Created: 9/20/2021
Updated: 9/30/2021
License: MIT License <https://opensource.org/licenses/MIT>
Data Sources:
MA Cannabis Control Commission
- Retail Sales by Date and Product Type: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/xwf2-j7g9
- Approved Massachusetts Licensees: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/hmwt-yiqy
- Average Monthly Price per Ounce for Adult-Use Cannabis: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/rqtv-uenj
- Plant Activity and Volume: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/j3q7-3usu
- Weekly sales by product type: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/87rp-xn9v
Fed Fred
- MA Gross Domestic Product: https://fred.stlouisfed.org/series/MANQGSP
- MA Civilian Labor Force: https://fred.stlouisfed.org/series/MALF
- MA All Employees: https://fred.stlouisfed.org/series/MANA
- MA Avg. Weekly Wage: https://fred.stlouisfed.org/series/LES1252881600Q
- MA Minimum Wage: https://fred.stlouisfed.org/series/STTMINWGMA
- MA Population: https://fred.stlouisfed.org/series/MAPOP
"""
from dotenv import dotenv_values
from fredapi import Fred
import pandas as pd
import requests
from scipy.stats import pearsonr
def end_of_period_timeseries(df, period='M'):
"""Convert a DataFrame from beginning-of-the-period to
end-of-the-period timeseries.
Args:
df (DataFrame): The DataFrame to adjust timestamps.
period (str): The period of the time series, monthly "M" by default.
Returns:
(DataFrame): The adjusted DataFrame, with end-of-the-month timestamps.
"""
df.index = df.index.to_period(period).to_timestamp(period)
return df
def reverse_dataframe(df):
"""Reverse the ordering of a DataFrame.
Args:
df (DataFrame): A DataFrame to re-order.
Returns:
(DataFrame): The re-ordered DataFrame.
"""
return df[::-1].reset_index(drop=True)
#--------------------------------------------------------------------------
# Get the data.
#--------------------------------------------------------------------------
# Setup Socrata API, get the App Token, and define the headers.
config = dotenv_values('../.env')
app_token = config.get('APP_TOKEN', None)
headers = {'X-App-Token': app_token}
base = 'https://opendata.mass-cannabis-control.com/resource'
# Get production stats (total employees, total plants, etc.) j3q7-3usu
url = f'{base}/j3q7-3usu.json'
params = {'$limit': 2000, '$order': 'activitysummarydate DESC'}
response = requests.get(url, headers=headers, params=params)
production = pd.DataFrame(response.json(), dtype=float)
production = reverse_dataframe(production)
variables = [
'activitysummarydate',
'total_plantimmaturecount',
'total_planttrackedcount',
'total_plantfloweringcount',
'total_plantvegetativecount',
'total_plantdestroyedcount',
'total_plantharvestedcount',
'total_plantcount',
'salestotal',
'total_active_harvestcount',
'total_active_packagecount',
'total_plantbatchcount',
'total_activeproducts',
'total_activestrains',
'total_employees'
]
#--------------------------------------------------------------------------
# Clean the data, standardize variables, and get supplemental data.
#--------------------------------------------------------------------------
# Initialize Fed Fred.
config = dotenv_values('../.env')
fred = Fred(api_key=config.get('FRED_API_KEY'))
# Find the observation time start.
start = production.activitysummarydate.min()
observation_start = start.split('T')[0]
# Calculate percent of the civilian labor force in Massachusetts.
labor_force = fred.get_series('MALF', observation_start=observation_start)
labor_force.index = labor_force.index.to_period('M').to_timestamp('M')
# Calculate sales difference.
production['sales'] = production['salestotal'].diff()
# Aggregate daily production data into totals.
production['date'] = pd.to_datetime(production['activitysummarydate'])
production.set_index('date', inplace=True)
monthly_avg_production = production.resample('M').mean()
quarterly_avg_production = production.resample('Q').mean()
# Calculate total employees as a percent of all employees in MA.
total_ma_employees = fred.get_series('MANA', observation_start=observation_start)
total_ma_employees = end_of_period_timeseries(total_ma_employees)
total_ma_employees = total_ma_employees.multiply(1000) # Thousands of people
# Get MA population (conjecturing that population remains constant in 2021).
population = fred.get_series('MAPOP', observation_start=observation_start)
population = end_of_period_timeseries(population, 'Y')
population = population.multiply(1000) # Thousands of people
new_row = pd.DataFrame([population[-1]], index=[pd.to_datetime('2021-12-31')])
population = pd.concat([population, | pd.DataFrame(new_row) | pandas.DataFrame |
from collections import defaultdict
import arrow
import numpy as np
import pandas as pd
train = pd.read_csv("../../../data/train.csv")
train["src"] = "train"
train["is_test"] = 0
test = | pd.read_csv("../../../data/test.csv") | pandas.read_csv |
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.backtesting import bt_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
EMPTY_DF = pd.DataFrame()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["ema", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
mocker.patch(
target=(
"openbb_terminal.stocks.backtesting.bt_controller."
"BacktestingController.switch"
),
return_value=["quit"],
)
result_menu = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=queue,
).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.parent_classes.session",
)
mocker.patch(
target="openbb_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
mocker.patch.object(
target=bt_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.stocks.backtesting.bt_controller.session",
)
mocker.patch(
target="openbb_terminal.stocks.backtesting.bt_controller.session.prompt",
return_value="quit",
)
result_menu = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=bt_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target="openbb_terminal.stocks.backtesting.bt_controller.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=(
"openbb_terminal.stocks.backtesting.bt_controller."
"BacktestingController.switch"
),
new=mock_switch,
)
result_menu = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["home", "help"]),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
("r", ["quit", "quit", "reset", "stocks", "load TSLA", "bt"]),
],
)
def test_switch(an_input, expected_queue):
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=None,
)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
)
controller.call_cls([])
assert not controller.queue
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
[
"quit",
"quit",
"quit",
],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
["quit", "quit", "reset", "stocks", "load TSLA", "bt"],
),
(
"call_reset",
["help"],
["quit", "quit", "reset", "stocks", "load TSLA", "bt", "help"],
),
],
)
def test_call_func_expect_queue(expected_queue, queue, func):
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=queue,
)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, mocked_func, other_args, called_with",
[
(
"call_ema",
"bt_view.display_simple_ema",
["-l=2", "--spy", "--no_bench", "--export=csv"],
dict(
ticker="MOCK_TICKER",
df_stock=EMPTY_DF,
ema_length=2,
spy_bt=True,
no_bench=True,
export="csv",
),
),
(
"call_ema_cross",
"bt_view.display_ema_cross",
[
"-l=2",
"--long=10",
"--short=20",
"--spy",
"--no_bench",
"--no_short",
"--export=csv",
],
dict(
ticker="MOCK_TICKER",
df_stock=EMPTY_DF,
short_ema=20,
long_ema=10,
spy_bt=True,
no_bench=True,
shortable=False,
export="csv",
),
),
(
"call_rsi",
"bt_view.display_rsi_strategy",
[
"--periods=2",
"--high=10",
"--low=20",
"--spy",
"--no_bench",
"--no_short",
"--export=csv",
],
dict(
ticker="MOCK_TICKER",
df_stock=EMPTY_DF,
periods=2,
low_rsi=20,
high_rsi=10,
spy_bt=True,
no_bench=True,
shortable=False,
export="csv",
),
),
],
)
def test_call_func(tested_func, mocked_func, other_args, called_with, mocker):
mock = mocker.Mock()
mocker.patch(
"openbb_terminal.stocks.backtesting.bt_controller." + mocked_func,
new=mock,
)
EMPTY_DF.drop(EMPTY_DF.index, inplace=True)
controller = bt_controller.BacktestingController(
ticker="MOCK_TICKER",
stock=EMPTY_DF,
)
getattr(controller, tested_func)(other_args=other_args)
if isinstance(called_with, dict):
mock.assert_called_once_with(**called_with)
elif isinstance(called_with, list):
mock.assert_called_once_with(*called_with)
else:
mock.assert_called_once()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func",
[
"call_ema",
"call_ema_cross",
"call_rsi",
],
)
def test_call_func_no_parser(func, mocker):
mocker.patch(
"openbb_terminal.stocks.backtesting.bt_controller.BacktestingController.parse_known_args_and_warn",
return_value=None,
)
controller = bt_controller.BacktestingController(
ticker="MOCK_TICKER",
stock=pd.DataFrame(),
)
func_result = getattr(controller, func)(other_args=list())
assert func_result is None
assert not controller.queue
controller.parse_known_args_and_warn.assert_called_once()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"ticker, expected",
[
(None, []),
("MOCK_TICKER", ["stocks", "load MOCK_TICKER", "bt"]),
],
)
def test_custom_reset(expected, ticker):
controller = bt_controller.BacktestingController(
ticker=None,
stock= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('averaged_perceptron_tagger')
import spacy
import math
import string
import sys
import random
from collections import Counter
from itertools import chain
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ge=pd.read_csv('./testgoodwordse.csv')
gn=pd.read_csv('./testgoodwordsn.csv')
gc=pd.read_csv('./testgoodwordsc.csv')
be=pd.read_csv('./testbadwordse.csv')
bn=pd.read_csv('./testbadwordsn.csv')
bc= | pd.read_csv('./testbadwordsc.csv') | pandas.read_csv |
import concurrent.futures
import csv
import itertools
import os
import time
from datetime import datetime, timezone
from pprint import pprint
import pandas as pd
import praw
import yaml
import utils
from args import args
# https://www.reddit.com/r/redditdev/comments/7muatr/praw_rate_limit_headers/drww09u
# No rate limits for reading the data
saved_details = {}
t1 = time.perf_counter()
# Day,Month,Year,Hour,Minute,Second
pattern = f"%d-%m-%Y-%H-%M-%S"
fetch_start = datetime.now(timezone.utc)
fetch_start_utc = float(int(fetch_start.timestamp()))
fetch_start = fetch_start.strftime(pattern)
headers = "RedditApp"
reddit = praw.Reddit("bot1", user_agent=headers)
print("*" * 80)
print(f"Reddit Read only mode: {reddit.read_only}")
print("*" * 80)
DSN = os.environ.get("ASYNCPG_DSN", "Not Set")
input_path = args.input_path
with open(input_path + "arguments.yml", "r") as stream:
config = yaml.safe_load(stream)
submission_columns = config["submission_columns"]
cleanse_submission_columns = config["cleanse_submission_columns"]
comment_columns = config["comment_columns"]
link_comments_columns = config["link_comments_columns"]
cleanse_comments_columns = config["cleanse_comments_columns"]
output_file_names = config["output_file_names"]
db_tables = config["db_tables"]
output_path = args.output_path
save_type = args.save_type
clean_text = True
if save_type == "csv":
clean_text = True
elif save_type == "db":
clean_text = False
elif save_type == "dbwi":
clean_text = False
print("Attempting to create tables with the following statements")
print("*" * 80)
with open(input_path + db_tables["init"]) as f:
statements = f.read()
print(statements)
print("*" * 80)
utils.init_db(DSN, statements)
save_type = "db"
input_file_name = input_path + args.input_file_name
fetch_type = args.submissions_type + "_"
submissions_file_name = (
output_path + output_file_names[0] + fetch_type + fetch_start + ".csv"
)
comments_file_name = (
output_path + output_file_names[1] + fetch_type + fetch_start + ".csv"
)
with open(input_file_name, newline="") as f:
reader = csv.reader(f)
data = list(reader)
subreddits_to_crawl = list(itertools.chain.from_iterable(data))
print(f"We will crawl the following subreddits:")
pprint(sorted(subreddits_to_crawl), compact=True)
print("*" * 80)
save_as = {
"type": save_type,
"file_name": comments_file_name,
"index": False,
"index_label": "permalink",
"table": db_tables["comments"],
"DSN": DSN,
}
if not os.path.isdir(output_path):
os.mkdir(output_path)
print(f"The data we'll pull from submissions is:")
pprint(sorted(submission_columns), compact=True)
print("*" * 80)
submissions_count = args.submissions_count
submissions_type = args.submissions_type
time_filter = args.time_filter
comments = args.comments
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
readObjs = executor.map(
utils.fetch_submissions,
subreddits_to_crawl,
[reddit] * len(subreddits_to_crawl),
[submissions_count] * len(subreddits_to_crawl),
[submissions_type] * len(subreddits_to_crawl),
[time_filter] * len(subreddits_to_crawl),
)
t2 = time.perf_counter()
print(f"Fetching submissions Finished in {t2-t1} seconds")
submissions_df_dict, submissions_to_crawl = utils.cleanse_submissions(
cleanse_submission_columns, comments, readObjs, submission_columns, clean_text
)
t3 = time.perf_counter()
print(f"Cleansing submissions Finished in {t3-t2} seconds")
total_submissions = len(submissions_df_dict["permalink"])
if comments:
comments_count = args.comments_count
print(f"The data we'll pull from comments is:")
pprint(sorted(comment_columns), compact=True)
print("*" * 80)
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
submission_comments = executor.map(
utils.fetch_comments,
submissions_to_crawl,
[reddit] * len(submissions_to_crawl),
[comments_count] * len(submissions_to_crawl),
)
t4 = time.perf_counter()
print(f"Fetching comments Finished in {t4-t3} seconds")
comments_df_dict = utils.cleanse_comments(
cleanse_comments_columns,
submission_comments,
comment_columns,
link_comments_columns,
clean_text,
)
t5 = time.perf_counter()
print(f"Cleansing comments Finished in {t5-t4} seconds")
total_comments = len(comments_df_dict["permalink"])
res = | pd.DataFrame.from_dict(comments_df_dict) | pandas.DataFrame.from_dict |
from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import pandas as pd
import numpy as np
import glob
import functools
import os
output_folder = 'Experiment_X-description/python_results'
plot_folder = f'{output_folder}/dwell_analysis_figs'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
filename = f'{output_folder}/TDP_cleaned.csv'
order = ['Native', 'Spontaneous', 'KJ', 'low_GrpE', 'high_GrpE']
palette = 'mako'
FRET_thresh = 0.5 #### FRET value at which to filter data above or below.
fps = 5 ### frames per second
thresh = 2 ### should be 10x expsoure if using NL515 smoothing on MASH FRET
headers = [f"< {FRET_thresh} to < {FRET_thresh}", f"< {FRET_thresh} to > {FRET_thresh}", f"> {FRET_thresh} to > {FRET_thresh}", f"> {FRET_thresh} to < {FRET_thresh}"]
TDP_data = pd.read_csv(filename, header = "infer")
from Utilities.Data_analysis import cleanup_dwell, filter_dwell, transition_frequency, calculate_mean, fret_state_trans
for treatment_name, df in TDP_data.groupby("treatment_name"):
initial_data = df[df["treatment_name"] == treatment_name]
cleaned_data = cleanup_dwell(initial_data, fps, thresh, 'keep') ##### to keep the first dwell state, simply change code to "cleanup_dwell(initial_data, "keep")
filtered_data = filter_dwell(cleaned_data, FRET_thresh, headers)
filtered_data.to_csv(f"{output_folder}/Dwell_times/Filtered_dwelltime_{treatment_name}.csv", index = False)
mean_dwell = calculate_mean(filtered_data, treatment_name)
mean_dwell.to_csv(f"{output_folder}/Mean_dwell/Filtered_meandwell_{treatment_name}.csv", index = False)
dwell_frequency = transition_frequency(filtered_data)
dwell_frequency["sample"] = treatment_name
dwell_frequency.to_csv(f"{output_folder}/Dwell_frequency/Filtered_dwellfrequency_{treatment_name}.csv", index = False, header = None)
###############
############### Plot FRET states before or after a transition to a defined FRET state
###############
Transition_threshold = 0.5
def plot_fret_trans(df, FRET_state = 'after', to_drop = 'none', threshold = Transition_threshold, palette = 'mako'):
"""Function to plot the FRET state before or after a transition above or below a defined FRET state
Args:
df (dataframe): dataframe that contains the concatenated dataset of all treatments, should be TDP_data
FRET_state (str, optional): Will determine whether or not you are looking at the FRET state 'before' or 'after' the transition. Defaults to 'after'.
to_drop (str, optional): Can input a list with the datasets that you want to drop from the plot. Will need to use those categories within the 'treatment_name' column within df. Defaults to 'none'.
threshold (_type_, optional): The FRET state that determines the kind of transitions you are looking at. If set to 0.3, and FRET_state is = 'before', this will plot the FRET state before transition to below 0.3 FRET. Defaults to Transition_threshold.
palette (str, optional): Choose colour scheme to plot. Defaults to 'mako'.
"""
if to_drop == 'none':
if FRET_state == 'after':
plot1 = plt.figure(figsize = (12, 6))
sns.set(style = "darkgrid", font_scale = 1.5)
sns.violinplot(data = df, x = 'treatment_name', y = 'FRET_after', palette = palette, order = order)
sns.stripplot(data = df, x = 'treatment_name', y = 'FRET_after', color='black', alpha = 0.25, order = order)
plt.ylabel(f'FRET state after transition from < {threshold}')
elif FRET_state == 'before':
plot1 = plt.figure(figsize = (12, 6))
sns.set(style = "darkgrid", font_scale = 1.5)
sns.violinplot(data = df, x = 'treatment_name', y = 'FRET_before', palette = palette, order = order)
sns.stripplot(data = df, x = 'treatment_name', y = 'FRET_before', color='black', alpha = 0.25, order = order)
plt.ylabel(f'FRET state before transition to < {threshold}')
else:
dropped = df[~df['treatment_name'].isin(to_drop)].dropna()
plot1 = plt.figure(figsize = (12, 6))
sns.set(style = "darkgrid", font_scale = 1.5)
sns.violinplot(data = dropped, x = 'treatment_name', y = 'FRET_before')
sns.stripplot(data = dropped, x = 'treatment_name', y = 'FRET_before', color='black', alpha = 0.25)
plt.rcParams['svg.fonttype'] = 'none'
plt.xlabel('Treatment')
plt.ylim(-0.1, 1.2)
plt.xticks(rotation=45)
plot1.savefig(f'{plot_folder}/FRET_{FRET_state}_trans_{Transition_threshold}.svg', dpi = 600)
plt.show()
# FRET_value_after_transition = fret_state_trans(TDP_data, Transition_threshold, fps, FRET_thresh, 'after')
# plot_fret_trans(FRET_value_after_transition, 'after')
FRET_value_before_transition = fret_state_trans(TDP_data, Transition_threshold, fps, FRET_thresh, 'before')
plot_fret_trans(FRET_value_before_transition, 'before')
###############
############### Calculate the number of binding or release events (defined when FRET crosses a FRET threshold) for each molecule then normalise to the lifetime of that molecule to get the rate
############### Will also plot the data
thresh = 0.2
def count_chaperone_events(dfs, thresh, fps_clean, thresh_clean):
"""Function to count the number of times that each molecule will go below a defined threshold from above the set threshold 'i.e. chaperone on' and vice versa 'i.e. chaperone off'
Args:
dfs (dataframe): dataframe containing raw TDP data, will be TDP_data
thresh (variable): defines the minimum duration of a FRET state that can be included for analysis. Any dwell time that is shorter than this variable (in seconds) is deleted and not used for subsequent analysis.
fps_clean (variable): previously defined threshold outlining the exposure rate. Is used to convert the dataset dwell times from frames to units of time.
thresh_clean (variable): variable that has been defined previously that dictates the threshold with which the FRET must cross to be counted
Returns:
dataframe: dataframe that contains all the molecules that meet the criteria. Columns contain 'molecule' which provide the molecule number, 'FRET_after' which indicates the number of events from
above threshold to below threshold, 'FRET_below' which indicates the number of events from below threshold to above threshold and 'Total Molecule Lifetime (min)' which is how long the molecule
was imaged before photobleaching occurs.
"""
cleaned_df = []
for treatment_name, df in dfs.groupby("treatment_name"):
initial_data = df[df["treatment_name"] == treatment_name]
cleaned = cleanup_dwell(initial_data, fps_clean, thresh_clean)
cleaned_df.append(cleaned)
cleaned_concat = pd.concat(cleaned_df)
cleaned_concat['Total Molecule Lifetime (min)'] = (cleaned_concat['number_of_frames']/5)/60
filt = []
for treatment_name, df in cleaned_concat.groupby("treatment_name"):
treatment = treatment_name
chaperone_on = df[(df['FRET_after'] <= thresh) & (df['FRET_before'] >= thresh)].groupby('Molecule').count()['FRET_after'].reset_index()
chaperone_off = df[(df['FRET_after'] >= thresh) & (df['FRET_before'] <= thresh)].groupby('Molecule').count()['FRET_before'].reset_index()
time = df.groupby('Molecule').mean()['Total Molecule Lifetime (min)'].reset_index()
# merged_test = chaperone_on.merge(chaperone_off, how = 'outer').fillna(0)
merged_test = functools.reduce(lambda left, right: pd.merge(left, right, on='Molecule', how='outer'), [chaperone_on, chaperone_off, time]) ### Really usefull code for merging multiple dfs
merged_test['treatment'] = treatment
filt.append(merged_test)
count_data = | pd.concat(filt) | pandas.concat |
"""
Detection Recipe - 192.168.3.11
References:
(1) 'Asteroseismic detection predictions: TESS' by Chaplin (2015)
(2) 'On the use of empirical bolometric corrections for stars' by Torres (2010)
(3) 'The amplitude of solar oscillations using stellar techniques' by Kjeldson (2008)
(4) 'An absolutely calibrated Teff scale from the infrared flux method'
by Casagrande (2010) table 4
(5) 'Characterization of the power excess of solar-like oscillations in red giants with Kepler'
by Mosser (2011)
(6) 'Predicting the detectability of oscillations in solar-type stars observed by Kepler'
by Chaplin (2011)
(7) 'The connection between stellar granulation and oscillation as seen by the Kepler mission'
by Kallinger et al (2014)
(8) 'The Transiting Exoplanet Survey Satellite: Simulations of Planet Detections and
Astrophysical False Positives' by Sullivan et al. (2015)
(9) Astropysics module at https://pythonhosted.org/Astropysics/coremods/coords.html
(10) <NAME>'s calc_noise IDL procedure for TESS.
(11) <NAME>lin's soldet6 IDL procedure to calculate the probability of detecting
oscillations with Kepler.
(12) Coordinate conversion at https://ned.ipac.caltech.edu/forms/calculator.html
(13) Bedding 1996
(14) 'The Asteroseismic potential of TESS' by Campante et al. 2016
"""
import numpy as np
from itertools import groupby
from operator import itemgetter
import sys
import pandas as pd
from scipy import stats
import warnings
warnings.simplefilter("ignore")
def bv2teff(b_v):
# from Torres 2010 table 2. Applies to MS, SGB and giant stars
# B-V limits from Flower 1996 fig 5
a = 3.979145106714099
b = -0.654992268598245
c = 1.740690042385095
d = -4.608815154057166
e = 6.792599779944473
f = -5.396909891322525
g = 2.192970376522490
h = -0.359495739295671
lteff = a + b*b_v + c*(b_v**2) + d*(b_v**3) + e*(b_v**4) + f*(b_v**5) + g*(b_v**6) + h*(b_v**7)
teff = 10.0**lteff
return teff
# from <NAME> 2003. BCv values from Flower 1996 polynomials presented in Torres 2010
# Av is a keword argument. If reddening values not available, ignore it's effect
def Teff2bc2lum(teff, parallax, parallax_err, vmag, Av=0):
lteff = np.log10(teff)
BCv = np.full(len(lteff), -100.5)
BCv[lteff<3.70] = (-0.190537291496456*10.0**5) + \
(0.155144866764412*10.0**5*lteff[lteff<3.70]) + \
(-0.421278819301717*10.0**4.0*lteff[lteff<3.70]**2.0) + \
(0.381476328422343*10.0**3*lteff[lteff<3.70]**3.0)
BCv[(3.70<lteff) & (lteff<3.90)] = (-0.370510203809015*10.0**5) + \
(0.385672629965804*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]) + \
(-0.150651486316025*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]**2.0) + \
(0.261724637119416*10.0**4*lteff[(3.70<lteff) & (lteff<3.90)]**3.0) + \
(-0.170623810323864*10.0**3*lteff[(3.70<lteff) & (lteff<3.90)]**4.0)
BCv[lteff>3.90] = (-0.118115450538963*10.0**6) + \
(0.137145973583929*10.0**6*lteff[lteff > 3.90]) + \
(-0.636233812100225*10.0**5*lteff[lteff > 3.90]**2.0) + \
(0.147412923562646*10.0**5*lteff[lteff > 3.90]**3.0) + \
(-0.170587278406872*10.0**4*lteff[lteff > 3.90]**4.0) + \
(0.788731721804990*10.0**2*lteff[lteff > 3.90]**5.0)
u = 4.0 + 0.4 * 4.73 - 2.0 * np.log10(parallax) - 0.4 * (vmag - Av + BCv)
lum = 10**u # in solar units
e_lum = (2.0 / parallax * 10**u)**2 * parallax_err**2
e_lum = np.sqrt(e_lum)
return lum, e_lum
# calculate seismic parameters
def seismicParameters(teff, lum):
# solar parameters
teff_solar = 5777.0 # Kelvin
teffred_solar = 8907.0 #in Kelvin
numax_solar = 3090.0 # in micro Hz
dnu_solar = 135.1 # in micro Hz
cadence = 120 # in s
vnyq = (1.0 / (2.0*cadence)) * 10**6 # in micro Hz
teffred = teffred_solar*(lum**-0.093) # from (6) eqn 8. red-edge temp
rad = lum**0.5 * ((teff/teff_solar)**-2) # Steffan-Boltzmann law
numax = numax_solar*(rad**-1.85)*((teff/teff_solar)**0.92) # from (14)
return cadence, vnyq, rad, numax, teffred, teff_solar, teffred_solar, numax_solar, dnu_solar
# no coordinate conversion before calculating tess field observing time. Only
# works with ecliptic coordinates
def tess_field_only(e_lng, e_lat):
# create a list to append all of the total observing times 'T' in the TESS field to
T = [] # units of sectors (0-13)
# create a list to append all of the maximum contiguous observations to
max_T = [] # units of sectors (0-13)
for star in range(len(e_lng)):
# 'n' defines the distance between each equidistant viewing sector in the TESS field.
n = 360.0/13
# Define a variable to count the total number of sectors a star is observed in.
counter = 0
# Define a variable to count all of the observations for each star.
# Put each observation sector into sca separately in order to find the largest number
# of contiguous observations for each star.
sca = []
# 'ranges' stores all of the contiguous observations for each star.
ranges = []
# Defines the longitude range of the observing sectors at the inputted stellar latitude
lngrange = 24.0/abs(np.cos(np.radians(e_lat[star])))
if lngrange>=360.0:
lngrange=360.0
# if the star is in the northern hemisphere:
if e_lat[star] >= 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360.0 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle bwtween the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
# the shortest distances between the centre of the sector and star, and the sector's
# overshoot and the star should add to 180.0 apart (i.e d1+d3=180.0)
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = 360.0 - abs(e_lng[star] - (a+180.0)%360.0)
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and 6.0<=e_lat[star]) or (d3<=lngrange/2.0 and 78.0<=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
# if the star is in the southern hemisphere:
if e_lat[star] < 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle between the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance of the 2 is the one that should be used
# to see if the star lies in the observing sector.
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = (360 - abs(e_lng[star] - (a+180.0)%360.0))
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and -6.0>=e_lat[star]) or (d3<=lngrange/2.0 and -78.0>=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
if len(sca) == 0:
ranges = [0]
else:
for k,g in groupby(enumerate(sca), lambda i_x:i_x[0]-i_x[1]):
group = map(itemgetter(1), g)
if np.array(group).sum() !=0:
ranges.append([len(list(group))])
T=np.append(T, counter)
max_T = np.append(max_T, np.max(np.array(ranges)))
return T, max_T
def calc_noise(imag, exptime, teff, e_lng = 0, e_lat = 30, g_lng = 96, g_lat = -30, subexptime = 2.0, npix_aper = 10, \
frac_aper = 0.76, e_pix_ro = 10, geom_area = 60.0, pix_scale = 21.1, sys_limit = 0):
omega_pix = pix_scale**2.0
n_exposures = exptime/subexptime
# electrons from the star
megaph_s_cm2_0mag = 1.6301336 + 0.14733937*(teff-5000.0)/5000.0
e_star = 10.0**(-0.4*imag) * 10.0**6 * megaph_s_cm2_0mag * geom_area * exptime * frac_aper
e_star_sub = e_star*subexptime/exptime
# e/pix from zodi
dlat = (abs(e_lat)-90.0)/90.0
vmag_zodi = 23.345 - (1.148*dlat**2.0)
e_pix_zodi = 10.0**(-0.4*(vmag_zodi-22.8)) * (2.39*10.0**-3) * geom_area * omega_pix * exptime
# e/pix from background stars
dlat = abs(g_lat)/40.0*10.0**0
dlon = g_lng
q = np.where(dlon>180.0)
if len(q[0])>0:
dlon[q] = 360.0-dlon[q]
dlon = abs(dlon)/180.0*10.0**0
p = [18.97338*10.0**0, 8.833*10.0**0, 4.007*10.0**0, 0.805*10.0**0]
imag_bgstars = p[0] + p[1]*dlat + p[2]*dlon**(p[3])
e_pix_bgstars = 10.0**(-0.4*imag_bgstars) * 1.7*10.0**6 * geom_area * omega_pix * exptime
# compute noise sources
noise_star = np.sqrt(e_star) / e_star
noise_sky = np.sqrt(npix_aper*(e_pix_zodi + e_pix_bgstars)) / e_star
noise_ro = np.sqrt(npix_aper*n_exposures)*e_pix_ro / e_star
noise_sys = 0.0*noise_star + sys_limit/(1*10.0**6)/np.sqrt(exptime/3600.0)
noise1 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0)
noise2 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0 + noise_sys**2.0)
return noise2
# calculate the granulation at a set of frequencies from (7) eqn 2 model F
def granulation(nu0, dilution, a_nomass, b1, b2, vnyq):
# Divide by dilution squared as it affects stars in the time series.
# The units of dilution change from ppm to ppm^2 microHz^-1 when going from the
# time series to frequency. p6: c=4 and zeta = 2*sqrt(2)/pi
Pgran = (((2*np.sqrt(2))/np.pi) * (a_nomass**2/b1) / (1 + ((nu0/b1)**4)) \
+ ((2*np.sqrt(2))/np.pi) * (a_nomass**2/b2) / (1 + ((nu0/b2)**4))) / (dilution**2)
# From (9). the amplitude suppression factor. Normalised sinc with pi (area=1)
eta = np.sinc((nu0/(2*vnyq)))
# the granulation after attenuation
Pgran = Pgran * eta**2
return Pgran, eta
# the total number of pixels used by the highest ranked x number of targets in the tCTL
def pixel_cost(x):
N = np.ceil(10.0**-5.0 * 10.0**(0.4*(20.0-x)))
N_tot = 10*(N+10)
total = np.cumsum(N_tot)
# want to find: the number of ranked tCTL stars (from highest to lowest rank) that correspond to a pixel cost of 1.4Mpix at a given time
per_cam = 26*4 # to get from the total pixel cost to the cost per camera at a given time, divide by this
pix_limit = 1.4e6 # the pixel limit per camera at a given time
return total[-1], per_cam, pix_limit, N_tot
# detection recipe to find whether a star has an observed solar-like Gaussian mode power excess
def globalDetections(g_lng, g_lat, e_lng, e_lat, imag, \
lum, rad, teff, numax, max_T, teffred, teff_solar, \
teffred_solar, numax_solar, dnu_solar, sys_limit, dilution, vnyq, cadence, vary_beta=False):
dnu = dnu_solar*(rad**-1.42)*((teff/teff_solar)**0.71) # from (14) eqn 21
beta = 1.0-np.exp(-(teffred-teff)/1550.0) # beta correction for hot solar-like stars from (6) eqn 9.
if isinstance(teff, float): # for only 1 star
if (teff>=teffred):
beta = 0.0
else:
beta[teff>=teffred] = 0.0
# to remove the beta correction, set Beta=1
if vary_beta == False:
beta = 1.0
# modified from (6) eqn 11. Now consistent with dnu proportional to numax^0.77 in (14)
amp = 0.85*2.5*beta*(rad**1.85)*((teff/teff_solar)**0.57)
# From (5) table 2 values for delta nu_{env}. env_width is defined as +/- some value.
env_width = 0.66 * numax**0.88
env_width[numax>100.] = numax[numax>100.]/2. # from (6) p12
total, per_cam, pix_limit, npix_aper = pixel_cost(imag)
noise = calc_noise(imag=imag, teff=teff, exptime=cadence, e_lng=e_lng, e_lat=e_lat, \
g_lng=g_lng, g_lat=g_lat, sys_limit=sys_limit, npix_aper=npix_aper)
noise = noise*10.0**6 # total noise in units of ppm
a_nomass = 0.85 * 3382*numax**-0.609 # multiply by 0.85 to convert to redder TESS bandpass.
b1 = 0.317 * numax**0.970
b2 = 0.948 * numax**0.992
# call the function for the real and aliased components (above and below vnyq) of the granulation
# the order of the stars is different for the aliases so fun the function in a loop
Pgran, eta = granulation(numax, dilution, a_nomass, b1, b2, vnyq)
Pgranalias = np.zeros(len(Pgran))
etaalias = np.zeros(len(eta))
# if vnyq is 1 fixed value
if isinstance(vnyq, float):
for i in range(len(numax)):
if numax[i] > vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq - (numax[i] - vnyq)), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
elif numax[i] < vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq + (vnyq - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
# if vnyq varies for each star
else:
for i in range(len(numax)):
if numax[i] > vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] - (numax[i] - vnyq[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
elif numax[i] < vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] + (vnyq[i] - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
Pgrantotal = Pgran + Pgranalias
ptot = (0.5*2.94*amp**2.*((2.*env_width)/dnu)*eta**2.) / (dilution**2.)
Binstr = 2.0 * (noise)**2. * cadence*10**-6.0 # from (6) eqn 18
bgtot = ((Binstr + Pgrantotal) * 2.*env_width) # units are ppm**2
snr = ptot/bgtot # global signal to noise ratio from (11)
fap = 0.05 # false alarm probability
pdet = 1.0 - fap
pfinal = np.full(rad.shape[0], -99)
idx = np.where(max_T != 0) # calculate the indexes where T is not 0
tlen=max_T[idx]*27.4*86400.0 # the length of the TESS observations in seconds
bw=1.0 * (10.0**6.0)/tlen
nbins=(2.*env_width[idx]/bw).astype(int) # from (11)
snrthresh = stats.chi2.ppf(pdet, 2.0*nbins) / (2.0*nbins) - 1.0
pfinal[idx] = stats.chi2.sf((snrthresh+1.0) / (snr[idx]+1.0)*2.0*nbins, 2.*nbins)
return pfinal, snr, dnu # snr is needed in TESS_telecon2.py
def BV2VI(bv, vmag, g_mag_abs):
whole = pd.DataFrame(data={'B-V': bv, 'Vmag': vmag, 'g_mag_abs': g_mag_abs, 'Ai': 0})
# Mg: empirical relation from Tiago to separate dwarfs from giants
# note: this relation is observational; it was made with REDDENED B-V and g_mag values
whole['Mg'] = 6.5*whole['B-V'] - 1.8
# B-V-to-teff limits from (6) fig 5
whole = whole[(whole['B-V'] > -0.4) & (whole['B-V'] < 1.7)]
print(whole.shape, 'after B-V cuts')
# B-V limits for dwarfs and giants, B-V conditions from (1)
# if a star can't be classified as dwarf or giant, remove it
condG = (whole['B-V'] > -0.25) & (whole['B-V'] < 1.75) & (whole['Mg'] > whole['g_mag_abs'])
condD1 = (whole['B-V'] > -0.23) & (whole['B-V'] < 1.4) & (whole['Mg'] < whole['g_mag_abs'])
condD2 = (whole['B-V'] > 1.4) & (whole['B-V'] < 1.9) & (whole['Mg'] < whole['g_mag_abs'])
whole = pd.concat([whole[condG], whole[condD1], whole[condD2]], axis=0)
print(whole.shape, 'after giant/dwarf cuts')
whole['V-I'] = 100. # write over these values for dwarfs and giants separately
# coefficients for giants and dwarfs
cg = [-0.8879586e-2, 0.7390707, 0.3271480, 0.1140169e1, -0.1908637, -0.7898824,
0.5190744, 0.5358868]
cd1 = [0.8906590e-1, 0.1319675e1, 0.4461807, -0.1188127e1, 0.2465572, 0.8478627e1,
0.1046599e2, 0.3641226e1]
cd2 = [-0.5421588e2, 0.8011383e3, -0.4895392e4, 0.1628078e5, -0.3229692e5,
0.3939183e5, -0.2901167e5, 0.1185134e5, -0.2063725e4]
# calculate (V-I) for giants
x = whole['B-V'][condG] - 1
y = (cg[0] + cg[1]*x + cg[2]*(x**2) + cg[3]*(x**3) + cg[4]*(x**4) +\
cg[5]*(x**5) + cg[6]*(x**6) + cg[7]*(x**7))
whole['V-I'][condG] = y + 1
x, y = [[] for i in range(2)]
# calculate (V-I) for dwarfs (1st B-V range)
x = whole['B-V'][condD1] - 1
y = (cd1[0] + cd1[1]*x + cd1[2]*(x**2) + cd1[3]*(x**3) + cd1[4]*(x**4) +\
cd1[5]*(x**5) + cd1[6]*(x**6) + cd1[7]*(x**7))
whole['V-I'][condD1] = y + 1
x, y = [[] for i in range(2)]
# calculate (V-I) for dwarfs (2nd B-V range)
x = whole['B-V'][condD2] - 1
y = (cd2[0] + cd2[1]*x + cd2[2]*(x**2) + cd2[3]*(x**3) + cd2[4]*(x**4) +\
cd2[5]*(x**5) + cd2[6]*(x**6) + cd2[7]*(x**7) + cd2[8]*(x**8))
whole['V-I'][condD2] = y + 1
x, y = [[] for i in range(2)]
# calculate Imag from V-I and reredden it
whole['Imag'] = whole['Vmag']-whole['V-I']
whole['Imag_reddened'] = whole['Imag'] + whole['Ai']
"""
# make Teff, luminosity, Plx and ELat cuts to the data
whole = whole[(whole['teff'] < 7700) & (whole['teff'] > 4300) & \
(whole['Lum'] > 0.3) & (whole['lum_D'] < 50) & ((whole['e_Plx']/whole['Plx']) < 0.5) \
& (whole['Plx'] > 0.) & ((whole['ELat']<=-6.) | (whole['ELat']>=6.))]
print(whole.shape, 'after Teff/L/Plx/ELat cuts')
"""
whole.drop(['Ai', 'Imag_reddened', 'Mg'], axis=1, inplace=True)
return whole.as_matrix().T
# make cuts to the data
def cuts(teff, e_teff, metal, e_metal, g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, Vmag, e_Vmag, plx, e_plx, lum, star_name):
d = {'teff':teff, 'e_teff':e_teff, 'metal':metal, 'e_metal':e_metal, 'g_lng':g_lng, 'g_lat':g_lat, 'e_lng':e_lng, 'e_lat':e_lat,
'Tmag':Tmag, 'e_Tmag':e_Tmag, 'Vmag':Vmag, 'e_Vmag':e_Vmag, 'plx':plx, 'e_plx':e_plx, 'lum':lum, 'star_name':star_name}
whole = pd.DataFrame(d, columns = ['teff', 'e_teff', 'metal', 'e_metal', 'g_lng', 'g_lat', 'e_lng', 'e_lat',
'Tmag', 'e_Tmag', 'Vmag', 'e_Vmag', 'plx', 'e_plx', 'lum', 'star_name'])
whole = whole[(whole['teff'] < 7700.) & (whole['teff'] > 4300.) & (whole['e_teff'] > 0.) & \
(whole['lum'] > 0.3) & (whole['lum'] < 50.) & ((whole['e_plx']/whole['plx']) < 0.5) & \
(whole['plx'] > 0.) & ((whole['e_lat']<=-6.) | (whole['e_lat']>=6.)) & \
(whole['Tmag'] > 3.5) & (whole['e_metal'] > 0.)]
print(whole.shape, 'after cuts to the data')
return whole.as_matrix().T
if __name__ == '__main__':
df = pd.read_csv('files/MAST_Crossmatch_TIC4.csv', header=0,
index_col=False)
data = df.values
# star_name = data[:, 1]
teff = pd.to_numeric(data[:, 88])
# e_teff = pd.to_numeric(data[:, 89])
# metal = pd.to_numeric(data[:, 92])
# e_metal = pd.to_numeric(data[:, 93])
# g_lng = pd.to_numeric(data[:, 48])
# g_lat = pd.to_numeric(data[:, 49])
# e_lng = pd.to_numeric(data[:, 50])
# e_lat = pd.to_numeric(data[:, 51])
# Tmag = pd.to_numeric(data[:, 84])
# e_Tmag = pd.to_numeric(data[:, 85])
Vmag = pd.to_numeric(data[:, 54])
# e_Vmag = pd.to_numeric(data[:, 55])
plx = pd.to_numeric(data[:, 45])
e_plx = pd.to_numeric(data[:, 46])
lum, e_lum = Teff2bc2lum(teff, plx, e_plx, Vmag)
df[' Luminosity'] = pd.Series(lum)
df[' Luminosity Err.'] = pd.Series(e_lum)
# teff, e_teff, metal, e_metal, g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, \
# Vmag, e_Vmag, plx, e_plx, lum, star_name = cuts(teff, e_teff, metal, e_metal,
# g_lng, g_lat, e_lng, e_lat, Tmag, e_Tmag, Vmag, e_Vmag,
# plx, e_plx, lum, star_name)
# make cuts to the data
df = df[(df[' T_eff'] < 7700.) & (df[' T_eff'] > 4300.) & (df[' T_eff Err.'] > 0.) & \
(df[' Luminosity'] > 0.3) & (df[' Luminosity'] < 50.) & ((df[' Parallax Err.']/df[' Parallax']) < 0.5) & \
(df[' Parallax'] > 0.) & ((df[' Ecl. Lat.']<=-6.) | (df[' Ecl. Lat.']>=6.)) & \
(df[' TESS Mag.'] > 3.5) & (df[' Metallicity Err.'] > 0.)]
df = df.reset_index(drop=True)
print(df.shape, 'after cuts to the data')
data = df.values
teff = pd.to_numeric(data[:, 88])
lum = | pd.to_numeric(data[:, 113]) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# In[66]:
import requests
import json
import pandas as pd
# In[67]:
client_id = '07bd2676-f950-48c0-8b12-ebd5e8b1491d'
client_secret = '<KEY>'
owner = "gitfeedV3"
thing = "github"
nodes = ['pulls', 'issues', 'commits']
# nodes = ['pulls', 'issues']
start_date = '2020-09-01T00:00:00-03:00'
end_date = '2020-09-15T23:59:59-03:00'
# In[68]:
def auth():
_auth_url = 'http://agrows-keycloak.labbs.com.br/auth/realms/agroWS/protocol/openid-connect/token'
_auth_data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials'
}
_auth_headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
_auth_request = requests.post(_auth_url, data=_auth_data, headers=_auth_headers)
return json.loads(_auth_request.text)['access_token']
# In[69]:
def load_data(_owner, _thing, _node, _token, _start_date, _end_date):
_data_api_url = f'https://agrows-data-api.labbs.com.br/v1/owner/{_owner}/thing/{_thing}/node/{_node}'
_data_api_params = {
'startDateTime': _start_date,
'endDateTime': _end_date
}
_data_api_headers = {
'content-type': 'application/json',
'Authorization' : f'Bearer {_token}'
}
_data_api_request = requests.get(_data_api_url, params=_data_api_params, headers=_data_api_headers)
_data = json.loads(_data_api_request.text)['data']
# data_source = json.dumps(data, indent=4, sort_keys=True)
return _data
# In[70]:
def load_mongo_data(_repo_list):
_data_api_url = f'http://localhost:3002/mongo/data'
_data_api_params = {
'repoList': _repo_list
}
_data_api_headers = {
'content-type': 'application/json'
}
_data_api_request = requests.get(_data_api_url, json=_data_api_params, headers=_data_api_headers)
_data = json.loads(_data_api_request.text)
# _data_source = json.dumps(_data, indent=4, sort_keys=True)
if 'data' in _data:
return _data['data']
return _data
# In[71]:
def load_raw_data(url):
_data_api_headers = {
'content-type': 'application/json'
}
_data_api_request = requests.get(url, headers=_data_api_headers)
_data = json.loads(_data_api_request.text)
return _data
# In[72]:
def calc_popularity(_qty):
if _qty < 20:
return 10
elif _qty in range(20, 49):
return 30
elif _qty in range(50, 99):
return 60
elif _qty >= 100:
return 90
# In[73]:
access_token = auth()
data = {}
for node in nodes:
raw_data = load_data(owner, thing, node, access_token, start_date, end_date)
raw_data = [{'dateTime': x['dateTime'], **x.get('attributes')} for x in raw_data]
for r in raw_data:
if 'dono' not in r:
del r
continue
r['owner'] = r.pop('dono')
for k in ['owner', 'name', 'participants', 'comments', 'author', 'labels', 'message']:
if k in r and ':' in r[k]:
r[k] = r[k].split(":")[1]
if k in r and r[k] == 'no-string':
r[k] = ''
# Keeping last version of itens
if node in 'commits':
data[node] = pd.DataFrame.from_dict(raw_data)
else:
data[node] = pd.DataFrame.from_dict(raw_data).sort_values('dateTime').groupby('number').tail(1)
# In[74]:
# Getting owner and name from all nodes to request data from Mongo
frames = []
for k in data:
frames.append(data[k][['owner', 'name']])
result = pd.concat(frames).drop_duplicates().to_json(orient="table", index=None)
repoList = json.loads(result)['data']
code_data = load_mongo_data(repoList)
data['code'] = | pd.DataFrame.from_dict(code_data) | pandas.DataFrame.from_dict |
from __future__ import print_function, absolute_import, unicode_literals, division
import csv
import random
from collections import OrderedDict
import pandas as pd
import nltk
import numpy as np
from keras_preprocessing.sequence import pad_sequences
from nltk import word_tokenize
import json
from sklearn import preprocessing
from tabulate import tabulate
from keras.preprocessing.text import Tokenizer
from amt.settings import PATH_visible_not_visible_actions_csv
from classify.elmo_embeddings import load_elmo_embedding
from classify.utils import reshape_3d_to_2d
from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, \
print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions
import os
import glob
from shutil import copytree
import string
from tqdm import tqdm
from nltk.tag import StanfordPOSTagger
from nltk import PorterStemmer
stemmer = PorterStemmer()
os.environ["CLASSPATH"] = "stanford-postagger-full-2018-10-16/"
os.environ["STANFORD_MODELS"] = "stanford-postagger-full-2018-10-16/models/"
st = StanfordPOSTagger('english-bidirectional-distsim.tagger')
path_visible_not_visible_actions_csv = 'data/AMT/Output/All/new_clean_visible_not_visible_actions_video_after_spam.csv'
glove = pd.read_table("data/glove.6B.50d.txt", sep=" ", index_col=0, header=None, quoting=csv.QUOTE_NONE)
table = str.maketrans({key: None for key in string.punctuation})
glove_pos = | pd.read_table("data/glove_vectors.txt", sep=" ", index_col=0, header=None, quoting=csv.QUOTE_NONE) | pandas.read_table |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_link_exact_missing(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='na_')
comp.exact('col', 'col', missing_value=0, label='na_0')
comp.exact('col', 'col', missing_value=9, label='na_9')
comp.exact('col', 'col', missing_value=nan, label='na_na')
comp.exact('col', 'col', missing_value='str', label='na_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_')
pdt.assert_series_equal(result['na_'], expected)
# Missing values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0')
pdt.assert_series_equal(result['na_0'], expected)
# Missing values as 9
expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9')
pdt.assert_series_equal(result['na_9'], expected)
# Missing values as nan
expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na')
pdt.assert_series_equal(result['na_na'], expected)
# Missing values as string
expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str')
pdt.assert_series_equal(result['na_str'], expected)
def test_link_exact_disagree(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='d_')
comp.exact('col', 'col', disagree_value=0, label='d_0')
comp.exact('col', 'col', disagree_value=9, label='d_9')
comp.exact('col', 'col', disagree_value=nan, label='d_na')
comp.exact('col', 'col', disagree_value='str', label='d_str')
result = comp.compute(ix, A, B)
# disagree values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_')
pdt.assert_series_equal(result['d_'], expected)
# disagree values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0')
pdt.assert_series_equal(result['d_0'], expected)
# disagree values as 9
expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9')
pdt.assert_series_equal(result['d_9'], expected)
# disagree values as nan
expected = Series([1, 1, nan, 0, 0], index=ix, name='d_na')
pdt.assert_series_equal(result['d_na'], expected)
# disagree values as string
expected = Series([1, 1, 'str', 0, 0], index=ix, name='d_str')
pdt.assert_series_equal(result['d_str'], expected)
# tests/test_compare.py:TestCompareNumeric
class TestCompareNumeric(TestData):
"""Test the numeric comparison methods."""
def test_numeric(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 2, 3, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', 'step', offset=2)
comp.numeric('col', 'col', method='step', offset=2)
comp.numeric('col', 'col', 'step', 2)
result = comp.compute(ix, A, B)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=1)
pdt.assert_series_equal(result[1], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
def test_numeric_with_missings(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', scale=2)
comp.numeric('col', 'col', scale=2, missing_value=0)
comp.numeric('col', 'col', scale=2, missing_value=123.45)
comp.numeric('col', 'col', scale=2, missing_value=nan)
comp.numeric('col', 'col', scale=2, missing_value='str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Missing values as 0
expected = Series(
[1.0, 1.0, 1.0, 0.0, 0.0], index=ix, dtype=np.float64, name=1)
pdt.assert_series_equal(result[1], expected)
# Missing values as 123.45
expected = Series([1.0, 1.0, 1.0, 123.45, 123.45], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
# Missing values as nan
expected = Series([1.0, 1.0, 1.0, nan, nan], index=ix, name=3)
pdt.assert_series_equal(result[3], expected)
# Missing values as string
expected = Series(
[1, 1, 1, 'str', 'str'], index=ix, dtype=object, name=4)
pdt.assert_series_equal(result[4], expected)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms(self, alg):
A = DataFrame({'col': [1, 1, 1, 1, 1]})
B = DataFrame({'col': [1, 2, 3, 4, 5]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='step', offset=1, label='step')
comp.numeric(
'col', 'col', method='linear', offset=1, scale=2, label='linear')
comp.numeric(
'col', 'col', method='squared', offset=1, scale=2, label='squared')
comp.numeric(
'col', 'col', method='exp', offset=1, scale=2, label='exp')
comp.numeric(
'col', 'col', method='gauss', offset=1, scale=2, label='gauss')
result_df = comp.compute(ix, A, B)
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
if alg != 'step':
print(alg)
print(result)
# sim(scale) = 0.5
expected_bool = Series(
[False, False, False, True, False], index=ix, name=alg)
pdt.assert_series_equal(result == 0.5, expected_bool)
# sim(offset) = 1
expected_bool = Series(
[True, True, False, False, False], index=ix, name=alg)
pdt.assert_series_equal(result == 1.0, expected_bool)
# sim(scale) larger than 0.5
expected_bool = Series(
[False, False, True, False, False], index=ix, name=alg)
pdt.assert_series_equal((result > 0.5) & (result < 1.0),
expected_bool)
# sim(scale) smaller than 0.5
expected_bool = Series(
[False, False, False, False, True], index=ix, name=alg)
pdt.assert_series_equal((result < 0.5) & (result >= 0.0),
expected_bool)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms_errors(self, alg):
# scale negative
if alg != "step":
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
# offset negative
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=-2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
def test_numeric_does_not_exist(self):
# raise when algorithm doesn't exists
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
# tests/test_compare.py:TestCompareDates
class TestCompareDates(TestData):
"""Test the exact comparison method."""
def test_dates(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col')
result = comp.compute(ix, A, B)[0]
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name=0)
pdt.assert_series_equal(result, expected)
def test_date_incorrect_dtype(self):
A = DataFrame({
'col':
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']
})
B = DataFrame({
'col': [
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
A['col1'] = to_datetime(A['col'])
B['col1'] = to_datetime(B['col'])
comp = recordlinkage.Compare()
comp.date('col', 'col1')
pytest.raises(ValueError, comp.compute, ix, A, B)
comp = recordlinkage.Compare()
comp.date('col1', 'col')
pytest.raises(ValueError, comp.compute, ix, A, B)
def test_dates_with_missings(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='m_')
comp.date('col', 'col', missing_value=0, label='m_0')
comp.date('col', 'col', missing_value=123.45, label='m_float')
comp.date('col', 'col', missing_value=nan, label='m_na')
comp.date('col', 'col', missing_value='str', label='m_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_')
pdt.assert_series_equal(result['m_'], expected)
# Missing values as 0
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_0')
pdt.assert_series_equal(result['m_0'], expected)
# Missing values as 123.45
expected = Series([1, 123.45, 0, 0.5, 0.5], index=ix, name='m_float')
pdt.assert_series_equal(result['m_float'], expected)
# Missing values as nan
expected = Series([1, nan, 0, 0.5, 0.5], index=ix, name='m_na')
pdt.assert_series_equal(result['m_na'], expected)
# Missing values as string
expected = Series(
[1, 'str', 0, 0.5, 0.5], index=ix, dtype=object, name='m_str')
pdt.assert_series_equal(result['m_str'], expected)
def test_dates_with_swap(self):
months_to_swap = [(9, 10, 123.45), (10, 9, 123.45), (1, 2, 123.45),
(2, 1, 123.45)]
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='s_')
comp.date(
'col', 'col', swap_month_day=0, swap_months='default', label='s_1')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months='default',
label='s_2')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months=months_to_swap,
label='s_3')
comp.date(
'col',
'col',
swap_month_day=nan,
swap_months='default',
missing_value=nan,
label='s_4')
comp.date('col', 'col', swap_month_day='str', label='s_5')
result = comp.compute(ix, A, B)
# swap_month_day as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='s_')
pdt.assert_series_equal(result['s_'], expected)
# swap_month_day and swap_months as 0
expected = Series([1, 0, 0, 0, 0.5], index=ix, name='s_1')
pdt.assert_series_equal(result['s_1'], expected)
# swap_month_day 123.45 (float)
expected = Series([1, 0, 0, 123.45, 0.5], index=ix, name='s_2')
pdt.assert_series_equal(result['s_2'], expected)
# swap_month_day and swap_months 123.45 (float)
expected = Series([1, 0, 0, 123.45, 123.45], index=ix, name='s_3')
pdt.assert_series_equal(result['s_3'], expected)
# swap_month_day and swap_months as nan
expected = Series([1, nan, 0, nan, 0.5], index=ix, name='s_4')
pdt.assert_series_equal(result['s_4'], expected)
# swap_month_day as string
expected = Series(
[1, 0, 0, 'str', 0.5], index=ix, dtype=object, name='s_5')
pdt.assert_series_equal(result['s_5'], expected)
# tests/test_compare.py:TestCompareGeo
class TestCompareGeo(TestData):
"""Test the geo comparison method."""
def test_geo(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step',
offset=50) # 50 km range
result = comp.compute(ix, A, B)
# Missing values as default [36.639460, 54.765854, 44.092472]
expected = Series([1.0, 0.0, 1.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
def test_geo_batch(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step', offset=1, label='step')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='linear',
offset=1,
scale=2,
label='linear')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='squared',
offset=1,
scale=2,
label='squared')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='exp',
offset=1,
scale=2,
label='exp')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='gauss',
offset=1,
scale=2,
label='gauss')
result_df = comp.compute(ix, A, B)
print(result_df)
for alg in ['step', 'linear', 'squared', 'exp', 'gauss']:
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
def test_geo_does_not_exist(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo('lat', 'lng', 'lat', 'lng', method='unknown')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareStrings(TestData):
"""Test the exact comparison method."""
def test_defaults(self):
# default algorithm is levenshtein algorithm
# test default values are indentical to levenshtein
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', label='default')
comp.string('col', 'col', method='levenshtein', label='with_args')
result = comp.compute(ix, A, B)
pdt.assert_series_equal(
result['default'].rename(None),
result['with_args'].rename(None)
)
def test_fuzzy(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='jaro', missing_value=0)
comp.string('col', 'col', method='q_gram', missing_value=0)
comp.string('col', 'col', method='cosine', missing_value=0)
comp.string('col', 'col', method='jaro_winkler', missing_value=0)
comp.string('col', 'col', method='dameraulevenshtein', missing_value=0)
comp.string('col', 'col', method='levenshtein', missing_value=0)
result = comp.compute(ix, A, B)
print(result)
assert result.notnull().all(1).all(0)
assert (result[result.notnull()] >= 0).all(1).all(0)
assert (result[result.notnull()] <= 1).all(1).all(0)
def test_threshold(self):
A = DataFrame({'col': [u"gretzky", u"gretzky99", u"gretzky", u"gretzky"]})
B = DataFrame({'col': [u"gretzky", u"gretzky", nan, u"wayne"]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.5,
missing_value=2.0,
label="x_col1"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=1.0,
missing_value=0.5,
label="x_col2"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.0,
missing_value=nan,
label="x_col3"
)
result = comp.compute(ix, A, B)
expected = Series([1.0, 1.0, 2.0, 0.0], index=ix, name="x_col1")
pdt.assert_series_equal(result["x_col1"], expected)
expected = Series([1.0, 0.0, 0.5, 0.0], index=ix, name="x_col2")
pdt.assert_series_equal(result["x_col2"], expected)
expected = Series([1.0, 1.0, nan, 1.0], index=ix, name="x_col3")
pdt.assert_series_equal(result["x_col3"], expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_incorrect_input(self, alg):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
with pytest.raises(Exception):
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
comp.compute(ix, A, B)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms_nan(self, alg):
A = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
B = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 0.0, 0.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=nan)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, nan, nan, nan, nan], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=9.0)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 9.0, 9.0, 9.0, 9.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms(self, alg):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=0)
result = comp.compute(ix, A, B)[0]
assert result.notnull().all()
assert (result >= 0).all()
assert (result <= 1).all()
assert (result > 0).any()
assert (result < 1).any()
def test_fuzzy_does_not_exist(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareFreq(object):
def test_freq(self):
# data
array_repeated = np.repeat(np.arange(10), 10)
array_tiled = np.tile(np.arange(20), 5)
# convert to pandas data
A = DataFrame({'col': array_repeated})
B = DataFrame({'col': array_tiled})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Frequency, FrequencyA, FrequencyB
comp = recordlinkage.Compare()
comp.add(Frequency(left_on='col'))
comp.add(FrequencyA('col'))
result = comp.compute(ix, A, B)
expected = Series(np.ones((100, )) / 10, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
comp = recordlinkage.Compare()
comp.add(Frequency(right_on='col'))
comp.add(FrequencyB('col'))
result = comp.compute(ix, A, B)
expected = Series(np.ones((100, )) / 20, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
def test_freq_normalise(self):
# data
array_repeated = np.repeat(np.arange(10), 10)
array_tiled = np.tile(np.arange(20), 5)
# convert to pandas data
A = DataFrame({'col': array_repeated})
B = DataFrame({'col': array_tiled})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Frequency
comp = recordlinkage.Compare()
comp.add(Frequency(left_on='col', normalise=False))
result = comp.compute(ix, A, B)
expected = DataFrame(np.ones((100, )) * 10, index=ix)
pdt.assert_frame_equal(result, expected)
comp = recordlinkage.Compare()
comp.add(Frequency(right_on='col', normalise=False))
result = comp.compute(ix, A, B)
expected = DataFrame(np.ones((100, )) * 5, index=ix)
pdt.assert_frame_equal(result, expected)
@pytest.mark.parametrize('missing_value', [0.0, np.nan, 10.0])
def test_freq_nan(self, missing_value):
# data
array_repeated = np.repeat(np.arange(10, dtype=np.float64), 10)
array_repeated[90:] = np.nan
array_tiled = np.tile(np.arange(20, dtype=np.float64), 5)
# convert to pandas data
A = DataFrame({'col': array_repeated})
B = DataFrame({'col': array_tiled})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Frequency
comp = recordlinkage.Compare()
comp.add(Frequency(left_on='col', missing_value=missing_value))
result = comp.compute(ix, A, B)
expected_np = np.ones((100, )) / 10
expected_np[90:] = missing_value
expected = DataFrame(expected_np, index=ix)
pdt.assert_frame_equal(result, expected)
class TestCompareVariable(object):
def test_variable(self):
# data
arrayA = np.random.random((100,))
arrayB = np.random.random((100,))
# convert to pandas data
A = DataFrame({'col': arrayA})
B = DataFrame({'col': arrayB})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# the part to test
from recordlinkage.compare import Variable, VariableA, VariableB
comp = recordlinkage.Compare()
comp.add(Variable(left_on='col'))
comp.add(VariableA('col'))
result = comp.compute(ix, A, B)
expected = Series(arrayA, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
comp = recordlinkage.Compare()
comp.add(Variable(right_on='col'))
comp.add(VariableB('col'))
result = comp.compute(ix, A, B)
expected = Series(arrayB, index=ix)
pdt.assert_series_equal(result[0], expected.rename(0))
pdt.assert_series_equal(result[1], expected.rename(1))
@pytest.mark.parametrize('missing_value', [0.0, np.nan, 10.0])
def test_variable_nan(self, missing_value):
# data
arrayA = np.random.random((100,))
arrayA[90:] = np.nan
arrayB = np.random.random((100,))
# convert to pandas data
A = DataFrame({'col': arrayA})
B = DataFrame({'col': arrayB})
ix = | MultiIndex.from_arrays([A.index.values, B.index.values]) | pandas.MultiIndex.from_arrays |
from snapedautility.detect_outliers import detect_outliers
import pandas as pd
import pytest
@pytest.fixture
def simple_series():
return | pd.Series([1, 2, 1, 2, 1, 1000]) | pandas.Series |
import pandas as pd
def main(type):
df = | pd.read_csv('./data/servant_data_'+type+'.csv') | pandas.read_csv |
import math
import operator
import pandas as pd
from scipy.stats import pearsonr,spearmanr,kendalltau,rankdata
import itertools
import numpy as np
import numexpr as ne
### Basic correlation measures ###
def corr_pearson(top_list_prev, top_list, k=None):
"""Compute Pearson correlation (based on Scipy)
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
if k != None:
top_list_prev = get_top_k(top_list_prev, k)
top_list = get_top_k(top_list, k)
list_a, list_b = proc_corr(top_list_prev, top_list)
return [pearsonr(list_a, list_b)[0]]
def corr_spearman(top_list_prev, top_list, k=None):
"""Compute Spearman's Rho correlation (based on Scipy)
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
if k != None:
top_list_prev = get_top_k(top_list_prev, k)
top_list = get_top_k(top_list, k)
list_a, list_b = proc_corr(top_list_prev, top_list)
return [spearmanr(list_a, list_b)[0]]
def corr_kendalltau(top_list_prev, top_list, k=None):
"""Compute Kendall's Tau correlation (based on Scipy).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
if k != None:
top_list_prev = get_top_k(top_list_prev, k)
top_list = get_top_k(top_list, k)
list_a, list_b = proc_corr(top_list_prev, top_list)
return [kendalltau(list_a, list_b)[0]]
def corr_weighted_kendalltau(top_list_prev, top_list, use_fast=True):
"""Compute weighted Kendall's Tau correlation (based on custom implementation!).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
list_a, list_b = proc_corr(top_list_prev, top_list)
if len(list_a) != len(list_b):
raise RuntimeError("The length of 'list_a' and 'list_b' must be the same!")
if use_fast:
return [fast_weighted_kendall(list_a, list_b)[1]]
else:
rank_list_a = tiedrank(list_a)
rank_list_b = tiedrank(list_b)
return [computeWKendall(rank_list_a,rank_list_b,ranked_input=True)[1]]
### Score list preprocessor functions ###
def get_top_k(l,k):
"""Get k biggest score from a list"""
if k==None:
return l
else:
return l.sort_values("score",ascending=False).head(k)
def proc_corr(l_1, l_2):
"""Fill lists with scores ordered by the ranks in the second list.
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
l1=l_1.copy()
l2=l_2.copy()
l1.columns=['l1_col']
l2.columns=['l2_col']
df=pd.concat([l2, l1], axis=1).fillna(0.0)
index_diff=list(set(list(l1.index))-set(list(l2.index)))
index_diff.sort()
sorted_id=list(l2.index)+index_diff # NOTE: input lists must be sorted! For custom weighted correlations?
df=df.reindex(sorted_id)
return np.array(df['l1_col']), np.array(df['l2_col'])
def tiedrank(vector):
"""Return rank with average tie resolution. Rank is based on decreasing score order"""
return (len(vector) + 1) * np.ones(len(vector)) - rankdata(vector, method='average')
def get_union_of_active_nodes(day_1, day_2):
"""Find common subvectors of non-zero elements. (we only consider positive scores to be active nodes)"""
ind_one=np.nonzero(day_1)[0];
ind_two=np.nonzero(day_2)[0];
ind=np.union1d(ind_one,ind_two)
ranks_day_one=tiedrank(day_1[ind])
ranks_day_two=tiedrank(day_2[ind])
return ranks_day_one, ranks_day_two
def computeWKendall(day_1,day_2,ranked_input=False):
"""Compute Kendall and WKendall only for active (nonzero) positions."""
if ranked_input:
rankX, rankY = day_1, day_2
else:
rankX, rankY = get_union_of_active_nodes(day_1, day_2)
n = len(rankX)
denomX, denomY = 0, 0
denomXW, denomYW = 0, 0
num, numW = 0, 0
for i in range(n):
for j in range(i+1,n):
#weightXY= 1.0/rankY[i]+1.0/rankY[j]
#weightX=1.0/rankX[i]+1.0/rankX[j];
weightY=1.0/rankY[i]+1.0/rankY[j];
termX=np.sign(rankX[i]-rankX[j]);
termY=np.sign(rankY[i]-rankY[j]);
denomX=denomX+(termX)**2;
denomY=denomY+(termY)**2;
denomXW=denomXW+(termX)**2*weightY;
denomYW=denomYW+(termY)**2*weightY;
num=num+termX*termY;
numW=numW+termX*termY*weightY;
Kendall=num/math.sqrt(denomX*denomY);
WKendall=numW/math.sqrt(denomXW*denomYW);
return [Kendall, WKendall]
### FastWKEndall ###
def count_ties(list_with_ties):
same_as_next = [list_with_ties[i]==list_with_ties[i+1] for i in range(len(list_with_ties)-1)]+[False]
count = 1
tie_counts = []
for i in range(len(list_with_ties)):
if same_as_next[i] == True:
count+=1
else:
tie_counts.extend([count for i in range(count)])
count =1
return tie_counts
def compute_avg_ranks(tie_counts):
ranks=[]
i=0
while len(ranks)<len(tie_counts):
rank = [(2*i+tie_counts[i]+1)/2 for j in range(tie_counts[i])]
i+=tie_counts[i]
ranks.extend(rank)
return ranks
def get_tie_list(index_list, value_list):
count_eq=1
value=value_list[0]
tie_indices={}
for i in range(1,len(value_list)):
if value_list[i]==value:
count_eq+=1
else:
for j in range(count_eq):
tie_indices[index_list[i-j-1]]=set([index_list[k] for k in range(i-count_eq,i)])
tie_indices[index_list[i-j-1]].remove(index_list[i-j-1])
value=value_list[i]
count_eq=1
i+=1
for j in range(count_eq):
tie_indices[index_list[i-j-1]]=set([index_list[k] for k in range(i-count_eq,i)])
tie_indices[index_list[i-j-1]].remove(index_list[i-j-1])
return tie_indices
def count_con_dis_diff(list_to_sort,tie_indices):
node_data = {'con':np.zeros(len(list_to_sort)), 'dis':np.zeros(len(list_to_sort))}
lists_to_merge = [[value] for value in list_to_sort]
index_lists = [[i] for i in range(len(list_to_sort))]
while len(lists_to_merge)>1:
merged_lists = []
merged_indicies = []
for i in range(int(len(lists_to_merge)/2)):
merged, indices = merge_list(lists_to_merge[2*i],lists_to_merge[2*i+1],
index_lists[2*i],index_lists[2*i+1], node_data, tie_indices)
merged_lists.append(merged)
merged_indicies.append(indices)
if len(lists_to_merge) % 2 != 0:
merged_lists.append(lists_to_merge[-1])
merged_indicies.append(index_lists[-1])
lists_to_merge = merged_lists
index_lists = merged_indicies
tie_counts = count_ties(lists_to_merge[0])
rank_B = compute_avg_ranks(tie_counts)
return_data = pd.DataFrame({'index':index_lists[0], 'rank_B':rank_B})
return_data.sort_values('index', inplace=True)
return_data.set_index('index', inplace=True)
return_data['concordant']=node_data["con"]
return_data['discordant']=node_data["dis"]
return return_data
def merge_list(left,right, index_left, index_right, node_data,tie_indices):
merged_list = []
merged_index = []
while ((len(left)>0) & (len(right)>0)):
if left[0]>=right[0]:
merged_list.append(left[0])
merged_index.append(index_left[0])
#####
non_ties=np.array(list(set(index_right)-tie_indices[index_left[0]])).astype('int')
node_data['con'][non_ties]+=1
node_data['con'][index_left[0]]+=len(non_ties)
#####
del left[0], index_left[0]
else:
merged_list.append(right[0])
merged_index.append(index_right[0])
####
non_ties=np.array(list(set(index_left)-tie_indices[index_right[0]])).astype('int')
node_data['dis'][non_ties]+=1
node_data['dis'][index_right[0]]+=len(non_ties)
####
del right[0], index_right[0]
if len(left)!=0:
merged_list.extend(left)
merged_index.extend(index_left)
elif len(right)!=0:
merged_list.extend(right)
merged_index.extend(index_right)
return merged_list, merged_index
def fast_weighted_kendall(x, y):
"""Weighted Kendall's Tau O(n*logn) implementation. The input lists should contain all nodes."""
# Anna switched list_a and list_b in her implementation
list_a, list_b = y, x
data_table = pd.DataFrame({'A':list_a, 'B':list_b})
data_table.to_csv("/home/fberes/wkendall_test.csv",index=False)
data_table['rank_A'] = tiedrank(list_a)
data_table = data_table.sort_values(['A', 'B'], ascending=False)
data_table.reset_index(inplace=True,drop=True)
data_table['index']=data_table.index
possible_pairs=len(data_table)-1
tie_list_A =get_tie_list(data_table.index,data_table['A'])
data_table['no_tie_A']=data_table['index'].apply(lambda x: possible_pairs-len(tie_list_A[x]))
sorted_B_index = np.array(data_table['B']).argsort()
sorted_B = np.array(data_table['B'])[sorted_B_index]
tie_list_B = get_tie_list(sorted_B_index, sorted_B)
data_table['no_tie_B']=data_table['index'].apply(lambda x: possible_pairs-len(tie_list_B[x]))
data_table.drop('index', inplace=True, axis=1)
tie_indices = {key:tie_list_A[key]|tie_list_B[key] for key in tie_list_A}
list_to_sort=list(data_table['B'])
con_dis_data = count_con_dis_diff(list_to_sort,tie_indices)
data_table = | pd.concat([data_table,con_dis_data], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
.. moduleauthor:: <NAME> (<EMAIL>, <EMAIL>)
"""
import fnmatch
import os
import random
import shutil
import time
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from scipy.stats import spearmanr, pearsonr
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from ..shared import apply_cyclic_transform, pickle_file
class ModelGeneratorBase(object):
def __init__(self, analysis_id, random_seed=None, **kwargs):
"""
Base class for generating ROMs
:param analysis_id: string, identifier of the model to build
:param random_seed: int, random seed to use
:param kwargs:
See below
:Keyword Arguments:
* *downsample* (``double``) -- Fraction to downsample the dataframe. If this exists
then the data will be downsampled, and the results will be stored in a directory with
this value appended.
"""
self.analysis_id = analysis_id
self.random_seed = random_seed if random_seed else np.random.seed(time.time())
self.model_results = []
self.model_type = self.__class__.__name__
self.dataset = None
self.downsample = kwargs.get('downsample', None)
print("Initializing %s" % self.model_type)
# Initialize the directories where results are to be stored.
if self.downsample:
self.base_dir = 'output/%s_%s/%s' % (self.analysis_id, self.downsample, self.model_type)
else:
self.base_dir = 'output/%s/%s' % (self.analysis_id, self.model_type)
self.images_dir = '%s/images' % self.base_dir
self.models_dir = '%s/models' % self.base_dir
if self.downsample:
self.validation_dir = 'output/%s_%s/ValidationData' % (
self.analysis_id, self.downsample)
else:
self.validation_dir = 'output/%s/ValidationData' % self.analysis_id
self.data_dir = '%s/data' % self.base_dir
# Remove some directories if they exist
for dir_n in ['images_dir', 'models_dir']:
if os.path.exists(getattr(self, dir_n)):
# print("removing the directory %s" % dir)
shutil.rmtree(getattr(self, dir_n))
# create directory if not exist for each of the above
for dir_n in ['base_dir', 'images_dir', 'models_dir', 'data_dir', 'validation_dir']:
if not os.path.exists(getattr(self, dir_n)):
os.makedirs(getattr(self, dir_n))
for root, dirnames, filenames in os.walk(self.base_dir):
for filename in fnmatch.filter(filenames, 'cv_results_*.csv'):
os.remove('%s/%s' % (self.base_dir, filename))
for filename in fnmatch.filter(filenames, 'model_results.csv'):
os.remove('%s/%s' % (self.base_dir, filename))
def save_dataframe(self, dataframe, path):
pickle_file(dataframe, path)
def inspect(self):
"""
Inspect the dataframe and return the statistics of the dataframe.
:return:
"""
# look at the entire datatset and save the statistics from the file to the data_dir
out_df = self.dataset.describe()
out_df.to_csv(f'{self.data_dir}/statistics.csv')
# list out all the columns
out_df = self.dataset.columns
with open(f'{self.data_dir}/column_names.csv', 'w') as f:
for column in self.dataset.columns:
f.write(column + '\n')
def load_data(self, datafile):
"""
Load the data into a dataframe. The data needs to be a CSV file at the moment.
:param datafile: str, path to the CSV file to load
:return: None
"""
if os.path.exists(datafile):
self.dataset = | pd.read_csv(datafile) | pandas.read_csv |
"""This is a finantial library useful to convert Candle Data from
financial time series datasets (Open,Close, High, Low, Volume).
It is built on Pandas and Numpy.
.. moduleauthor:: <NAME>
"""
import pandas as pd
from datetime import datetime
def convertcandle(
time: pd.Series,
open: pd.Series,
high: pd.Series,
low: pd.Series,
close: pd.Series,
timeframe: str,
fromtime: str,
totime: str,
dtformat: str) -> pd.DataFrame:
"""OHLC Candle Converter
Args:
time(pandas.Series): dataset 'Time' Column.
close(pandas.Series): dataset 'Close' column.
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
timeframe(str): output candle time Period (see reference above).
fromtime(datetime): begin of conversion.
totime(datetime): end of conversion.
dtformat(str): string of all data input formats
timeframe accepted output:
['1m', '5m', '15m', '30m', '1h', '2h', '3h', '4h', '6h', '12h', '1d', '1w', '1M', '1y']
"""
def get_input_tf(time, fmt):
first = datetime.strptime(time[0], fmt)
second = datetime.strptime(time[1], fmt)
tf = (second - first)
return tf
def check_tfs(dictframes, timeframe, time, dtformat):
"""this module verify that input is smaller than output."""
if dictframes[timeframe][1] == 'M':
dictframes[timeframe][0] = 32
dictframes[timeframe][1] = 'd'
if dictframes[timeframe][1] == 'y':
dictframes[timeframe][0] = 370
dictframes[timeframe][1] = 'd'
input_tf = get_input_tf(time, dtformat)
output_tf = pd.to_timedelta(dictframes[timeframe][0], unit=dictframes[timeframe][1])
if input_tf >= output_tf:
raise ValueError("Output timeframe must be bigger than input timeframe.")
else:
return input_tf
def get_candle_times(time, timeframe, fromtime, totime, fmt):
"""This function will generate the time series for the output candle dataframe"""
time_lst = time.tolist()
fromtime = datetime.strptime(fromtime, fmt)
totime = datetime.strptime(totime, fmt)
for t in time_lst:
t = datetime.strptime(t, fmt)
if t >= fromtime:
if timeframe[1] == 'm' and timeframe[0] < 60: # minutely tfs
if t.minute % timeframe[0] == 0 and t.second == 0:
"""You found the first candle time"""
new_time_lst = []
timedelta = pd.to_timedelta(timeframe[0], unit=timeframe[1])
while t < totime + timedelta:
str_t = datetime.strftime(t, fmt)
new_time_lst.append(str_t)
t = t + timedelta
else:
time_df = pd.Series(new_time_lst)
return time_df
if timeframe[1] == 'm' and timeframe[0] > 60: # hourly tfs
if t.hour % (timeframe[0] / 60) == 0 and t.minute == 0 and t.second == 0:
"""You found the first candle time"""
new_time_lst = []
timedelta = pd.to_timedelta(timeframe[0], unit=timeframe[1])
while t < totime + timedelta:
str_t = datetime.strftime(t, fmt)
new_time_lst.append(str_t)
t = t + timedelta
else:
time_df = pd.Series(new_time_lst)
return time_df
pass
if timeframe[1] == 'd' and timeframe[0] == 1: # daily tf
if t.hour == 0 and t.minute == 0 and t.second == 0:
"""You found the first candle time"""
new_time_lst = []
timedelta = pd.to_timedelta(timeframe[0], unit=timeframe[1])
while t < totime + timedelta:
str_t = datetime.strftime(t, fmt)
new_time_lst.append(str_t)
t = t + timedelta
else:
time_df = pd.Series(new_time_lst)
return time_df
if timeframe[1] == 'w' and timeframe[0] < 60: # weekly tf
if t.weekday() == 0 and t.hour == 0 and t.minute == 0 and t.second == 0:
"""You found the first candle time"""
new_time_lst = []
timedelta = pd.to_timedelta(timeframe[0], unit=timeframe[1])
while t < totime + timedelta:
str_t = datetime.strftime(t, fmt)
new_time_lst.append(str_t)
t = t + timedelta
else:
time_df = pd.Series(new_time_lst)
return time_df
if timeframe[1] == 'd' and timeframe[0] < 40: # montly tf
if t.day == 1 and t.hour == 0 and t.minute == 0 and t.second == 0:
"""You found the first candle time"""
new_time_lst = []
timedelta = pd.to_timedelta(timeframe[0], unit=timeframe[1])
while t < totime + timedelta:
str_t = datetime.strftime(t, fmt)
new_time_lst.append(str_t)
if t.month < 12:
t = t.replace(month=t.month + 1)
else:
t = t.replace(month=1, year=t.year + 1)
else:
time_df = | pd.Series(new_time_lst) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = | Panel.from_dict(d3) | pandas.core.panel.Panel.from_dict |
import csv
import logging
from datetime import datetime
from pathlib import Path
import extract_data as ex
import pandas as pd
logger = logging.getLogger(__name__)
def read_dat_as_DataFrame(input_filepath):
logger.info(f"reading {input_filepath}")
converted_count = 0
start_ts = datetime.now()
records = []
with input_filepath.open("r") as fin:
for line in fin:
if not line.startswith("A"):
continue
try:
epicenter = ex.extract_epicenter(line)
except ex.ExtractError as e:
logger.warning("skipped due to ExtractError: %s", line)
continue
except Exception as e:
logger.error("error line: %s", line)
raise e
records.append(epicenter)
converted_count += 1
df = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 10:59:14 2020
@author: <NAME>
"""
#reproducability
from numpy.random import seed
seed(1+347823)
import tensorflow as tf
tf.random.set_seed(1+63493)
import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
# from bayes_opt.util import load_logs #needed if logs are already available
import os
import pandas as pd
import datetime
from scipy import stats
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
from uncertainties import unumpy
gpus = tf.config.experimental.list_physical_devices('GPU')
# =============================================================================
#### Functions
# =============================================================================
def load_GW_and_HYRAS_Data(i):
#define where to find the data
pathGW = "./GWData"
pathHYRAS = "./HYRAS"
pathconnect = "/"
#load a list of all sites
well_list = pd.read_csv("./list.txt")
Well_ID = well_list.ID[i]
#load and merge the data
GWData = pd.read_csv(pathGW+pathconnect+Well_ID+'_GW-Data.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
HYRASData = pd.read_csv(pathHYRAS+pathconnect+Well_ID+'_weeklyData_HYRAS.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
data = pd.merge(GWData, HYRASData, how='inner', left_index = True, right_index = True)
return data, Well_ID
def split_data(data, GLOBAL_SETTINGS):
#split the test data from the rest
dataset = data[(data.index < GLOBAL_SETTINGS["test_start"])] #Testdaten abtrennen
#split remaining time series into three parts 80%-10%-10%
TrainingData = dataset[0:round(0.8 * len(dataset))]
StopData = dataset[round(0.8 * len(dataset))+1:round(0.9 * len(dataset))]
StopData_ext = dataset[round(0.8 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:round(0.9 * len(dataset))] #extend data according to dealys/sequence length
OptData = dataset[round(0.9 * len(dataset))+1:]
OptData_ext = dataset[round(0.9 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:] #extend data according to dealys/sequence length
TestData = data[(data.index >= GLOBAL_SETTINGS["test_start"]) & (data.index <= GLOBAL_SETTINGS["test_end"])]
TestData_ext = pd.concat([dataset.iloc[-GLOBAL_SETTINGS["seq_length"]:], TestData], axis=0) # extend Testdata to be able to fill sequence later
return TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext
def to_supervised(data, GLOBAL_SETTINGS):
#make the data sequential
#modified after <NAME> and machinelearningmastery.com
X, Y = list(), list()
# step over the entire history one time step at a time
for i in range(len(data)):
# find the end of this pattern
end_idx = i + GLOBAL_SETTINGS["seq_length"]
# check if we are beyond the dataset
if end_idx >= len(data):
break
# gather input and output parts of the pattern
seq_x, seq_y = data[i:end_idx, 1:], data[end_idx, 0]
X.append(seq_x)
Y.append(seq_y)
return np.array(X), np.array(Y)
class MCDropout(tf.keras.layers.Dropout):
#define Monte Carlo Dropout Layer, where training state is always true (even during prediction)
def call(self, inputs):
return super().call(inputs, training=True)
def predict_distribution(X, model, n):
preds = [model(X) for _ in range(n)]
return np.hstack(preds)
def gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train,X_stop, Y_stop):
# define model
seed(ini+872527)
tf.random.set_seed(ini+87747)
inp = tf.keras.Input(shape=(GLOBAL_SETTINGS["seq_length"], X_train.shape[2]))
cnn = tf.keras.layers.Conv1D(filters=GLOBAL_SETTINGS["filters"],
kernel_size=GLOBAL_SETTINGS["kernel_size"],
activation='relu',
padding='same')(inp)
cnn = tf.keras.layers.MaxPool1D(padding='same')(cnn)
cnn = MCDropout(0.5)(cnn)
cnn = tf.keras.layers.Flatten()(cnn)
cnn = tf.keras.layers.Dense(GLOBAL_SETTINGS["dense_size"], activation='relu')(cnn)
output1 = tf.keras.layers.Dense(1, activation='linear')(cnn)
# tie together
model = tf.keras.Model(inputs=inp, outputs=output1)
optimizer = tf.keras.optimizers.Adam(learning_rate=GLOBAL_SETTINGS["learning_rate"],
epsilon=10E-3, clipnorm=GLOBAL_SETTINGS["clip_norm"])
model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
# early stopping
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',
verbose=0, patience=15,restore_best_weights = True)
# fit network
history = model.fit(X_train, Y_train, validation_data=(X_stop, Y_stop),
epochs=GLOBAL_SETTINGS["epochs"], verbose=0,
batch_size=GLOBAL_SETTINGS["batch_size"], callbacks=[es])
return model, history
def bayesOpt_function(pp,densesize, seqlength, batchsize, filters):
#basically means conversion to rectangular function
densesize_int = int(densesize)
seqlength_int = int(seqlength)
batchsize_int = int(batchsize)
filters_int = int(filters)
pp = int(pp)
return bayesOpt_function_with_discrete_params(pp, densesize_int, seqlength_int, batchsize_int, filters_int)
def bayesOpt_function_with_discrete_params(pp,densesize_int, seqlength_int, batchsize_int, filters_int):
assert type(densesize_int) == int
assert type(seqlength_int) == int
assert type(batchsize_int) == int
assert type(filters_int) == int
#[...]
# fixed settings for all experiments
GLOBAL_SETTINGS = {
'pp': pp,
'batch_size': batchsize_int, #16-128
'kernel_size': 3, #ungerade!
'dense_size': densesize_int,
'filters': filters_int,
'seq_length': seqlength_int,
'clip_norm': True,
'clip_value': 1,
'epochs': 100,
'learning_rate': 1e-3,
'test_start': pd.to_datetime('02012012', format='%d%m%Y'),
'test_end': pd.to_datetime('28122015', format='%d%m%Y')
}
## load data
data, Well_ID = load_GW_and_HYRAS_Data(GLOBAL_SETTINGS["pp"])
#modify test period if data ends earlier
if GLOBAL_SETTINGS["test_end"] > data.index[-1]:
GLOBAL_SETTINGS["test_end"] = data.index[-1]
GLOBAL_SETTINGS["test_start"] = GLOBAL_SETTINGS["test_end"] - datetime.timedelta(days=(365*4))
#scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler_gwl = MinMaxScaler(feature_range=(-1, 1))
scaler_gwl.fit(pd.DataFrame(data['GWL']))
data_n = pd.DataFrame(scaler.fit_transform(data), index=data.index, columns=data.columns)
#split data
TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext = split_data(data, GLOBAL_SETTINGS)
TrainingData_n, StopData_n, StopData_ext_n, OptData_n, OptData_ext_n, TestData_n, TestData_ext_n = split_data(data_n, GLOBAL_SETTINGS)
#sequence data
X_train, Y_train = to_supervised(TrainingData_n.values, GLOBAL_SETTINGS)
X_stop, Y_stop = to_supervised(StopData_ext_n.values, GLOBAL_SETTINGS)
X_opt, Y_opt = to_supervised(OptData_ext_n.values, GLOBAL_SETTINGS)
X_test, Y_test = to_supervised(TestData_ext_n.values, GLOBAL_SETTINGS)
#build and train model with idifferent initializations
os.chdir(basedir)
inimax = 3
optresults_members = np.zeros((len(X_opt), inimax))
for ini in range(inimax):
print("(pp:{}) BayesOpt-Iteration {} - ini-Ensemblemember {}".format(pp,len(optimizer.res)+1, ini+1))
model,history = gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train, X_stop, Y_stop)
opt_sim_n = model.predict(X_opt)
opt_sim = scaler_gwl.inverse_transform(opt_sim_n)
optresults_members[:, ini] = opt_sim.reshape(-1,)
opt_sim_median = np.median(optresults_members,axis = 1)
sim = np.asarray(opt_sim_median.reshape(-1,1))
obs = np.asarray(scaler_gwl.inverse_transform(Y_opt.reshape(-1,1)))
err = sim-obs
meanTrainingGWL = np.mean(np.asarray(TrainingData['GWL']))
meanStopGWL = np.mean(np.asarray(StopData['GWL']))
err_nash = obs - np.mean([meanTrainingGWL, meanStopGWL])
r = stats.linregress(sim[:,0], obs[:,0])
print("total elapsed time = {}".format(datetime.datetime.now()-time1))
print("(pp = {}) elapsed time = {}".format(pp,datetime.datetime.now()-time_single))
return (1 - ((np.sum(err ** 2)) / (np.sum((err_nash) ** 2)))) + r.rvalue ** 2 #NSE+R²: (max = 2)
def simulate_testset(pp,densesize_int, seqlength_int, batchsize_int, filters_int):
# fixed settings for all experiments
GLOBAL_SETTINGS = {
'pp': pp,
'batch_size': batchsize_int, #16-128
'kernel_size': 3, #ungerade!
'dense_size': densesize_int,
'filters': filters_int,
'seq_length': seqlength_int,
'clip_norm': True,
'clip_value': 1,
'epochs': 100,
'learning_rate': 1e-3,
'test_start': pd.to_datetime('02012012', format='%d%m%Y'),
'test_end': | pd.to_datetime('28122015', format='%d%m%Y') | pandas.to_datetime |
from datetime import time
import numpy as np
import pytest
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestBetweenTime:
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH#20725
df = | DataFrame([[1, 2, 3], [4, 5, 6]]) | pandas.DataFrame |
from flask import Blueprint, request, jsonify, make_response, url_for
from flask.views import MethodView
from io import StringIO
from marshmallow import ValidationError
import pandas as pd
from sfa_api import spec
from sfa_api.utils import storage
from sfa_api.schema import (ObservationSchema, ObservationLinksSchema,
ObservationValueSchema, ObservationPostSchema)
class AllObservationsView(MethodView):
def get(self, *args):
"""
---
summary: List observations.
description: List all observations that the user has access to.
tags:
- Observations
responses:
200:
description: A list of observations
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ObservationMetadata'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
observations = storage.list_observations()
return ObservationSchema(many=True).jsonify(observations)
def post(self, *args):
"""
---
summary: Create observation.
tags:
- Observations
description: Create a new Observation by posting metadata.
requestBody:
description: JSON respresentation of an observation.
required: True
content:
application/json:
schema:
$ref: '#/components/schemas/ObservationDefinition'
responses:
201:
description: Observation created successfully
content:
application/json:
schema:
$ref: '#/components/schemas/ObservationMetadata'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
data = request.get_json()
try:
observation = ObservationPostSchema().loads(data)
except ValidationError as err:
return jsonify(err.messages), 400
else:
obs_id = storage.store_observation(observation)
response = make_response('Observation created.', 201)
response.headers['Location'] = url_for('observations.single',
obs_id=obs_id)
return response
class ObservationView(MethodView):
def get(self, obs_id, **kwargs):
"""
---
summary: Get Observation options.
description: List options available for Observation.
tags:
- Observations
responses:
200:
description: Observation options retrieved successfully.
content:
application/json:
schema:
$ref: '#/components/schemas/ObservationLinks'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
observation = storage.read_observation(obs_id)
if observation is None:
return 404
return ObservationLinksSchema().jsonify(observation)
def delete(self, obs_id, *args):
"""
---
summary: Delete observation.
description: Delete an Observation, including its values and metadata.
tags:
- Observations
parameters:
- $ref: '#/components/parameters/obs_id'
responses:
200:
description: Observation deleted successfully.
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
deletion_result = storage.delete_observation(obs_id)
return deletion_result
class ObservationValuesView(MethodView):
def get(self, obs_id, *args):
"""
---
summary: Get Observation data.
description: Get the timeseries values from the Observation entry.
tags:
- Observations
parameters:
- $ref: '#/components/parameters/obs_id'
responses:
200:
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ObservationValue'
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
errors = []
start = request.args.get('start', None)
end = request.args.get('end', None)
if start is not None:
try:
start = pd.Timestamp(start)
except ValueError:
errors.append('Invalid start date format')
if end is not None:
try:
end = pd.Timestamp(end)
except ValueError:
errors.append('Invalid end date format')
if errors:
return jsonify({'errors': errors}), 400
values = storage.read_observation_values(obs_id, start, end)
return ObservationValueSchema(many=True).jsonify(values)
def post(self, obs_id, *args):
"""
---
summary: Add Observation data.
description: Add new timeseries values to the Observation entry.
tags:
- Observations
parameters:
- $ref: '#/components/parameters/obs_id'
requestBody:
required: True
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ObservationValue'
text/csv:
schema:
type: string
description: |
Text file with fields separated by ',' and
lines separated by '\\n'. The first line must
be a header with the following fields:
timestamp, value, quality_flag. Timestamp must be
an ISO 8601 datetime, value may be an integer or float,
quality_flag may be 0 or 1 (indicating the value is not
to be trusted).
example: |-
timestamp,value,quality_flag
2018-10-29T12:04:23Z,32.93,0
responses:
201:
$ref: '#/components/responses/201-Created'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
# Check content-type and parse data conditionally
if request.content_type == 'application/json':
raw_data = request.get_json()
try:
raw_values = raw_data['values']
except (TypeError, KeyError):
return 'Supplied JSON does not contain "values" field.', 400
try:
observation_df = pd.DataFrame(raw_values)
except ValueError:
return 'Malformed JSON', 400
elif request.content_type == 'text/csv':
raw_data = StringIO(request.get_data(as_text=True))
try:
observation_df = | pd.read_csv(raw_data, comment='#') | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
import pandas as pd
import numpy as np
import seaborn as sns
import sqlite3
from IPython.core.display import display, HTML
# %%
con = sqlite3.connect('../data/corpus.sqlite3')
cur = con.cursor()
# %%
#cur.execute("select * from Annotations where instr(Body, ':-*') > 0")
cur.execute("select * from Annotations_consolidated limit 10")
# %% [markdown]
# # Vocabulary
#
# forum: All posts to one article?
#
# thread: ?
#
# First round: Dry run - Not in the dataset
#
# Second round: 1,000 posts (randomly selected) were annotated for each of the nine labels.
#
# Third round:
#
# 1. 2,599 posts from 10 articles were annotated for each of the nine labels (selected to increase frequency of negative sentiment, inappropriate, disciminating, and off-topic)
# 1. 5,737 posts were annotated regarding personal stories (selected from "share your thoughts" section)
# 1. 2,439 posts were annotated regarding feedback (selected from sample were moderators already answered or that were predicted as "needs feedback" by an existing model)
#
# A maximum of 1010 articles has at least one labled posts (for labels excluding feedback and personal stories (there may be more for these two))
# %% [markdown]
# ## Posts
# %%
df_posts = pd.read_sql_query("select * from Posts", con)
df_posts.columns = ["id_post", "id_parent_post", "id_article", "id_user", "created_at_string", "status", "headline", "body", "positive_votes", "negative_votes"]
df_posts["created_at"] = pd.to_datetime(df_posts.created_at_string)
# %%
df_posts
# %%
df_posts.describe(datetime_is_numeric=True)
# %% [markdown]
# There are posts with date before our time frame of the dataset. Let us investigate them further:
# %%
df_early_posts = df_posts.query("created_at < '2015-06-01'").copy()
df_early_posts
# %% [markdown]
# Are posts ordered by creation time? Therefore, what is the maximum id_post?
# %%
df_early_posts.describe().id_post
# %%
df_posts.loc[1842, "body"]
# %% [markdown]
# Posts after the official time frame of the dataset:
# %%
df_late_posts = df_posts.query("created_at > '2016-05-31'").copy()
df_late_posts
# %%
df_late_posts.id_article.nunique()
# %%
df_early_posts.id_post.hist()
# %%
df_late_posts.id_post.hist()
# %%
df_posts.info()
# %%
df_posts.status.unique()
# %%
df_posts.isnull().sum()
# %%
df_posts.query("body == ''").shape
# %%
df_posts.query("headline == ''").shape
# %%
df_posts.query("headline == '' and body == ''")
# %% [markdown]
# Check for posts where Body and Headline are empty string or None/NaN: \
# (`Headline != Headline` works because None/NaN is never None/NaN)
# %%
df_posts.query("(headline == '' or headline != headline) and (body == '' or body != body)")
# %%
# %%
df_posts[df_posts.body.isna()]
# %% [markdown]
# ## Articles
# %%
df_articles = pd.read_sql_query("select * from Articles", con)
df_articles.columns = ['id_article', 'path', 'publishing_date_string', 'title', 'body']
df_articles["publishing_date"] = pd.to_datetime(df_articles.publishing_date_string)
df_articles.head()
# %%
is_newsroom = df_articles.path.str.split("/", n=1, expand=True).loc[:,0]=="Newsroom"
df_articles[is_newsroom]
# %% [markdown]
# How many articles do we have per main category?
# %%
df_articles.path.str.split("/", n=1, expand=True).loc[:,0].value_counts()
# %% [markdown]
# What is Kiaroom???
# %%
is_kiaroom = df_articles.path.str.split("/", n=1, expand=True).loc[:,0] == "Kiaroom"
df_articles[is_kiaroom]
# %%
df_articles.describe(datetime_is_numeric=True)
# %% [markdown]
# ### Time on articles
# %%
df_early_articles = df_articles.query("publishing_date < '2015-06-01'").copy()
df_early_articles.head()
# %%
df_early_articles.shape
# %%
df_early_articles.id_article.nunique()
# %%
# %% [markdown]
# ## Annotations
#
# ### Consolidated
# %%
df_annotations = pd.read_sql_query("select * from Annotations_consolidated", con)
df_annotations.columns = df_annotations.columns.str.lower()
df_annotations.head()
# %%
df_annotations.describe()
# %% [markdown]
# ### Pure annotations
# %%
df_annotations_pure = pd.read_sql_query("select * from Annotations", con)
df_annotations_pure.columns = df_annotations_pure.columns.str.lower()
df_annotations_pure.head(20)
# %%
df_annotations_pure.groupby("id_annotator").category.value_counts()
# %% [markdown]
# Annotator 1 and 2 annotated in all rounds. Annotator 3 only annotated in the second round, whereas annotator 4 only annotated in round three.
#
# Checking annotations for round 3: Annotator 1: 2594-1000 = 1594, Annotator 2: 1513-1000 = 513, Annotator 4: 492; Overall annotations in round 3: 1594 + 513 + 492 = 2599
# %% [markdown]
# ## Categories
# %%
df_categories = | pd.read_sql_query("select * from Categories", con) | pandas.read_sql_query |
import os
import pickle
import numpy as np
import xgboost as xgb
import pandas as pd
from bayes_opt import BayesianOptimization
from .xgb_callbacks import callback_overtraining, early_stop
from .xgboost2tmva import convert_model
import warnings
# Effective RMS evaluation function for xgboost
def evaleffrms(preds, dtrain, c=0.683):
labels = dtrain.get_label()
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
# since preds are margin(before logistic transformation, cutoff at 0)
x = np.sort(preds / labels, kind="mergesort")
m = int(c * len(x)) + 1
effrms = np.min(x[m:] - x[:-m]) / 2.0
return "effrms", effrms # + 10*(max(np.median(preds/labels), np.median(labels/preds)) - 1)
# The space of hyperparameters for the Bayesian optimization
#hyperparams_ranges = {'min_child_weight': (1, 30),
# 'colsample_bytree': (0.1, 1),
# 'max_depth': (2, 20),
# 'subsample': (0.5, 1),
# 'gamma': (0, 20),
# 'reg_alpha': (0, 10),
# 'reg_lambda': (0, 20)}
# The default xgboost parameters
#xgb_default = {'min_child_weight': 1,
# 'colsample_bytree': 1,
# 'max_depth': 6,
# 'subsample': 1,
# 'gamma': 0,
# 'reg_alpha': 0,
# 'reg_lambda': 1}
def format_params(params):
""" Casts the hyperparameters to the required type and range.
"""
p = dict(params)
p['min_child_weight'] = p["min_child_weight"]
p['colsample_bytree'] = max(min(p["colsample_bytree"], 1), 0)
p['max_depth'] = int(p["max_depth"])
# p['subsample'] = max(min(p["subsample"], 1), 0)
p['gamma'] = max(p["gamma"], 0)
# p['reg_alpha'] = max(p["reg_alpha"], 0)
p['reg_lambda'] = max(p["reg_lambda"], 0)
return p
def merge_two_dicts(x, y):
""" Merge two dictionaries.
Writing such a function is necessary in Python 2.
In Python 3, one can just do:
d_merged = {**d1, **d2}.
"""
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
class XgboFitter(object):
"""Fits a xgboost classifier/regressor with Bayesian-optimized hyperparameters.
Public attributes:
Private attributes:
_random_state (int): seed for random number generation
"""
def __init__(self, out_dir,
random_state = 2018,
num_rounds_max = 3000,
num_rounds_min = 0,
early_stop_rounds = 100,
nthread = 16,
regression = False,
useEffSigma =True
):
"""The __init__ method for XgboFitter class.
Args:
data (pandas.DataFrame): The data frame containing the features
and target.
X_cols (:obj:`list` of :obj:`str`) : Names of the feature columns.
y_col (str) : Name of the colum containing the target of the binary
classification. This column has to contain zeros and
ones.
"""
self._out_dir = out_dir
pkl_file = open(out_dir+'/param_range.pkl', 'rb')
global hyperparams_ranges
hyperparams_ranges= pickle.load(pkl_file)
pkl_file.close()
pkl_file = open(out_dir+'/param_default.pkl', 'rb')
global xgb_default
xgb_default= pickle.load(pkl_file)
pkl_file.close()
if not os.path.exists(os.path.join(out_dir, "cv_results")):
os.makedirs(os.path.join(out_dir, "cv_results"))
self._random_state = random_state
self._num_rounds_max = num_rounds_max
self._num_rounds_min = num_rounds_min
self._early_stop_rounds = early_stop_rounds
self.params_base = {
'silent' : 1,
'verbose_eval': 0,
'seed' : self._random_state,
'nthread' : nthread,
'objective' : 'reg:linear',
}
if regression:
xgb_default['base_score']=1#for regression the base_score should be 1, not 0.5. If enough iteration this will not matter much
if useEffSigma:
self._cv_cols = ["train-effrms-mean", "train-effrms-std",
"test-effrms-mean", "test-effrms-std"]
else:
self._cv_cols = ["train-rmse-mean", "train-rmse-std",
"test-rmse-mean", "test-rmse-std"]
else:
self._cv_cols = ["train-auc-mean", "train-auc-std", "test-auc-mean", "test-auc-std"]
self.params_base["objective"] = "binary:logitraw"
self.params_base["eval_metric"] = "auc"
self._regression = regression
self._useEffSigma = useEffSigma
# Increment the random state by the number of previously done
# experiments so we don't use the same numbers twice
summary_file = os.path.join(out_dir, "summary.csv")
if os.path.isfile(summary_file):
df = pd.read_csv(summary_file)
self._random_state = self._random_state + len(df)
# Set up the Bayesian optimization
self._bo = BayesianOptimization(self.evaluate_xgb, hyperparams_ranges, random_state=self._random_state)
# This list will memorize the number of rounds that each step in the
# Bayesian optimization was trained for before early stopping gets
# triggered. This way, we can train our final classifier with the
# correct n_estimators matching to the optimal hyperparameters.
self._early_stops = []
# This dictionary will hold the xgboost models created when running
# this training class.
self._models = {}
self._cv_results = []
self._cvi = 0
#
self._callback_status = []
self._tried_default = False
# Load the summary file if it already exists in the out_dir
if os.path.isfile(summary_file):
self._load_data()
def _load_data(self):
summary_file = os.path.join(self._out_dir, "summary.csv")
df = | pd.read_csv(summary_file) | pandas.read_csv |
#realtor_graph.py
#from neo4j_connect_2 import NeoSandboxApp
#import neo4j_connect_2 as neo
#import GoogleServices as google
#from pyspark.sql import SparkSession
#from pyspark.sql.functions import struct
from cgitb import lookup
import code
from dbm import dumb
from doctest import master
from hmac import trans_36
import mimetypes
from platform import node
from pprint import pprint
from pty import master_open
from re import sub
from unittest.util import unorderable_list_difference
from urllib.parse import non_hierarchical
from neomodel import (config, StructuredNode, StringProperty, IntegerProperty,
UniqueIdProperty, RelationshipTo, BooleanProperty, EmailProperty, Relationship,db)
import pandas as pd
#import NeoNodes as nn
#import GoogleServices
import neo4jClasses
#import sparkAPI as spark
import neoModelAPI as neo
import glob
import os
import json
import numpy as np
#from neoModelAPI import NeoNodes as nn
class DataUploadFunctions():
def upload_df(self,df):
#df.apply(lambda x: pprint(str(x) + str(type(x))))
node_list = df.apply(lambda x: neo.neoAPI.update(x))
#pprint(node_list)
return node_list
def map_to_df(self,df1,df2,lookup_value :str, lookup_key: str):
df1[lookup_value] = df1[lookup_key]
#pprint(df1.columns)
#pprint(df1)
val = df1[lookup_value].replace(dict(zip(df2[lookup_key], df2[lookup_value])))
return val
def set_relationships(self,source_node, target_node):
#pprint(self.df.columns)
#pprint(source_node)
rel = neo.neoAPI.create_relationship(source = source_node ,target = target_node)
return rel
class DataPipelineFunctions():
def write_df_to_csv(self,df,path: str):
cwd = os.getcwd()
path = os.sep.join([cwd,path])
with open(path,'w') as f:
df.to_csv(path, index=False)
return path
def create_city_nodes(self,df):
city_nodes = df['city_name'].apply(lambda x :neo.neoAPI.create_city_node(name = x))
return city_nodes
def create_url_nodes(self,df):
url_nodes = df['root_realtor_url'].apply(lambda x :neo.neoAPI.create_realtor_search_url_node(url= x))
return url_nodes
def create_root_nodes(self,df):
root_nodes = df['root_realtor_url'].apply(lambda x :neo.neoAPI.create_root_node(url= x))
return root_nodes
def create_country_nodes(self,df):
country_nodes = df.apply(lambda x :neo.neoAPI.create_country_node(code = x.country_code, name = x.country_name),axis =1)
return country_nodes
def return_unique_country_df(self,df):
df = df.drop_duplicates(subset=['country_name']).copy()
df.drop(df.columns.difference(['country_node','state_node','country_name', 'country_code','state_name']), 1, inplace=True)
#pprint(df)
return df
def create_state_nodes(self,df):
state_nodes = df.apply(lambda x :neo.neoAPI.create_state_node(code = x.state_code, name = x.state_name),axis =1)
return state_nodes
def return_unique_state_df(self,df):
df = df.drop_duplicates(subset=['state_name']).copy()
df.drop(df.columns.difference(['state_node','country_node','country_code','state_name','country_name','state_code']), 1, inplace=True)
#pprint(df)
return df
def rename_columns(self,df, mapper = {'city': 'city_name', 'state': 'state_code','realtor_url': 'root_realtor_url'}):
return df.rename(columns = mapper)
def add_country_code(self,country_code = "USA"):
return country_code
def add_country_name(self,country_name = "United States of America"):
return country_name
def upload_df(self,df):
#df.apply(lambda x: pprint(str(x) + str(type(x))))
node_list = df.apply(lambda x: neo.neoAPI.update(x))
pprint(node_list)
return node_list
#df['server_node'] = node_list
#pprint(df)
def set_url_relationships(self):
#pprint(self.df.columns)
update_list = self.df.apply(lambda x: neo.neoAPI.create_relationship(source = x.url_node.city,target = x.city_node), axis=1)
pprint(update_list)
return update_list
#rel = self.df.url.connect(self.df.city)
def set_city_relationships(self):
#pprint(self.df.columns)
update_list = self.df.apply(lambda x: neo.neoAPI.create_relationship(source = x.city_node.country,target = x.country_node), axis=1)
update_list = self.df.apply(lambda x: neo.neoAPI.create_relationship(source = x.city_node.state,target = x.state_node), axis=1)
pprint(update_list)
#rel = self.df.url.connect(self.df.city)
def set_state_relationships(self):
#pprint(self.df.columns)
neo.neoAPI.create_relationship(source = self.unique_state_nodes.state_node[0].country,target = self.unique_state_nodes.country_node[0])
#update_list = self.unique_state_nodes.apply(lambda x: neo.neoAPI.create_relationship(source = x.state_node.country,target = x.country_node.name), axis=1)
#pprint(update_list)
#rel = self.df.url.connect(self.df.city)
def group_by_state(self):
grouped = self.df.groupby(by = "state_name")
def load_data_to_pandas_df(self,file_path = None):
if file_path != None:
with open (file_path) as file:
df = pd.read_json(file)
return df
def nodify_city_column(self):
self.df['city_node'] = self.df['city'].apply(lambda x : neo.neoAPI.create_city_node(name = x))
#pprint(df.city_nodes)
def nodify_states_column(self):
unique_states = self.df.drop_duplicates(subset=['state']).copy()
#pprint(state_dict)
unique_states['state_node'] = unique_states.apply(lambda x: neo.neoAPI.create_state_node(name = x.state_name, code = x.state), axis=1)
#pprint(unique_states)
#self.df['state_nodes'] = unique_states['state_nodes'] where unique_states[state_name] = self.df_stateName
self.df["state_node"] = self.df['state_name']
#self.df['state_node'] =
#pprint(self.df['state_name'].map(unique_states))
self.df['state_node'] = self.df['state_node'].replace(dict(zip(unique_states.state_name, unique_states.state_node)))
#pprint(self.df)
#mask = dfd['a'].str.startswith('o')
#self.df['state_nodes'] = self.df.apply(lambda x: neo.create_state_node(name = x.state_name, code = x.state) if x not in states_dict else states_dict[x], axis=1)
def nodify_url_column(self):
self.df['url_node'] = self.df['realtor_url'].apply(lambda x : neo.neoAPI.create_url_node(url = x, searched= False))
def get_cwd():
cwd = os.getcwd()
return cwd
def get_files(cwd =os.getcwd(), input_directory = 'extras'):
path = os.sep.join([cwd,input_directory])
file_list= [f for f in glob.glob(path + "**/*.json", recursive=True)]
return file_list
def instantiate_neo_model_api():
uri = "7a92f171.databases.neo4j.io"
user = "neo4j"
psw = 'RF4Gr2IJTNhHlW6HOrLDqz_I2E2Upyh7o8paTwfnCxg'
return neo.neoAPI.instantiate_neo_model_session(uri=uri,user=user,psw=psw)
def prepare_data_pipeline():
pipeline_functions = DataPipelineFunctions()
master_df = pipeline_functions.load_data_to_pandas_df()
master_df['country_name'] = pipeline_functions.add_country_name()
master_df['country_code'] = pipeline_functions.add_country_code()
master_df = pipeline_functions.rename_columns(master_df)
master_df['city_node'] = pipeline_functions.create_city_nodes(master_df)
master_df['url_node'] = pipeline_functions.create_url_nodes(master_df)
master_df['root_node'] = pipeline_functions.create_root_nodes(master_df)
master_df_path = pipeline_functions.write_df_to_csv(master_df,'master_df.csv')
state_df = pipeline_functions.return_unique_state_df(master_df)
state_df['state_node'] = pipeline_functions.create_state_nodes(state_df)
state_df_path = pipeline_functions.write_df_to_csv(state_df,'state_df.csv')
country_df = pipeline_functions.return_unique_country_df(master_df)
country_df['country_node'] = pipeline_functions.create_country_nodes(country_df)
country_df_path = pipeline_functions.write_df_to_csv(country_df,'country.csv')
#upload nodes
return {"master_df" : master_df, 'state_df' : state_df, 'country_df': country_df}
def load_json_data(file):
f = open (file, "r")
# Reading from file
data = json.loads(f.read())
return data
def json_pipeline(file_list, master_subject_table):
case_counter = 0
for file in file_list:
data = load_json_data(file=file)
data = data['results']
#pprint(data)
#pprint(data[0])
#filtered_data = filter_json_data(json_data = data, filter = filter)
# Creating the case nodes transaction nodes and df
data = clean_json_data(data)
case_data = stringify_json_values(data)
case_data = pandify_case_data(case_data)
case_data = nodify_case_data(case_data = case_data)
# Creating the subject nodes transaction nodes and df
subject_list = slice_subject_data(data)
subject_list = identify_unique_subjects(subject_list)
subject_lookup_table = create_subject_lookup_table(subject_list)
master_subject_table = integrate_to_master_table(subject_lookup_table,master_subject_table)
#pprint(master_subject_table.duplicated())
case_counter = case_counter + len(case_data)
master_subject_table = nodify_subjects(master_subject_table)
#pprint(case_data)
#pprint(master_subject_table['transaction'])
#lets save data to the database
master_subject_table = submit_subjects_to_db(master_subject_table)
case_data = submit_cases_to_db(case_data = case_data)
# Create Relationships
relationship_list= create_relationship_table(case_data=case_data, master_subject_table=master_subject_table)
def submit_cases_to_db(case_data):
#unsubmitted = master_subject_table[master_subject_table.notna()]
### in theory none of the cases wouldhave been submitted becasue i am pulling them from file. There is no need to check.. Just submit
#non_submitted_nodes = case_data[case_data['submitted'].isna()].copy()
#pprint(non_submitted_nodes)
##pprint(non_submitted_nodes)
#if non_submitted_nodes.empty:
# return case_data
#else:
case_data['transaction'] = case_data['transaction'].apply(lambda x: neo.neoAPI.update(x))
#Assume all are submitted..
case_data['submitted'] = True
#test = non_submitted_nodes.iloc[32]['transaction']
#return_obj = neo.neoAPI.update(test)
#case_data.update(non_submitted_nodes)
return case_data
#Relationships must need to be created following saving to the df
#relationships = create_relationship_table(case_data, master_subject_table)
def submit_subjects_to_db(master_subject_table):
#unsubmitted = master_subject_table[master_subject_table.notna()]
#pprint(master_subject_table)
#non_submitted_nodes=master_subject_table[[master_subject_table['submitted'] == np.nan]]
non_submitted_nodes = master_subject_table[master_subject_table['submitted'].isna()].copy()
#pprint(non_submitted_nodes)
if non_submitted_nodes.empty:
return master_subject_table
else:
#pprint(non_submitted_nodes)
non_submitted_nodes['transaction'] = non_submitted_nodes['transaction'].apply(lambda x: neo.neoAPI.update(x))
non_submitted_nodes['submitted'] = True
#test = non_submitted_nodes.iloc[32]['transaction']
#return_obj = neo.neoAPI.update(test)
master_subject_table.update(non_submitted_nodes)
#pprint(master_subject_table)
return master_subject_table
def tester():
return "Hello Dolly"
def create_relationship_table(case_data, master_subject_table):
#pprint(case_data[])
#test = master_subject_table['subject']
# select
relationship_list = []
for row in range(len(case_data)):
unique_dataframe = (master_subject_table[master_subject_table['subject'].isin(case_data['subject_list'][row])])
#pprint(unique_dataframe)
for subject_row in range(len(unique_dataframe)):
case = case_data.iloc[row]['transaction']
subject = unique_dataframe.iloc[subject_row]['transaction']
#create relationship
#pprint(case)
#pprint(subject)
relationship = neo.neoAPI.create_relationship(case.subject_relationship,subject)
#pprint(relationship)
relationship_list.append(relationship)
return relationship_list
#create relationship between the case and each uid in the unique_data_frame_transaction_list
pprint(unique_dataframe)
## Creating the realation table
# Thoughts
# pass subject and case table
# case_subject list collumn
# where that list is in the master table
#return the subjects
# make a connection to between each subject and the case in the returned tableuid in the table
# return a transaction list
# with the list commit a transaction for eachn
#
#case_data= filter_case_data(data)
def nodify_case_data(case_data):
#non_submitted_nodes = case_data[case_data.notna()]
non_submitted_nodes = case_data[case_data.notna().any(axis=1)]
#pprint(non_submitted_nodes)
case_nodes = non_submitted_nodes.apply(lambda x :neo.neoAPI.create_case_node(date = x['date'], dates= x['dates'],group = x['group'], name=x['id'], pdf= x['pdf'], shelf_id = x['shelf_id'], subject= x['subject'], title = x['title'], url = x['url'], subject_relationship=True), axis=1)
case_data['transaction'] = case_nodes
return case_data
def filter_case_data(data):
pprint(data[0])
def nodify_subjects(master_subject_table):
non_submitted_nodes = master_subject_table[master_subject_table.isna().any(axis=1)].copy()
#df[df.isna().any(axis=1)]
#pprint(non_submitted_nodes)
non_submitted_nodes['transaction'] = non_submitted_nodes['subject'].apply(lambda x :neo.neoAPI.create_subject_node(name = x))
master_subject_table.update(non_submitted_nodes)
return master_subject_table
def integrate_to_master_table(subject_lookup_table, master_subject_table):
#check_if subject in list is in subject of the table
# if so drop it from the temp table
# append what is left to the master table
#pprint(subject_lookup_table)
test = master_subject_table['subject']
unique_dataframe = (subject_lookup_table[~subject_lookup_table['subject'].isin(test)])
#pprint(unique_dataframe)
#duplicate_list = (master_subject_table[~master_subject_table['subject'].isin(subject_lookup_table['subject'])])
master_subject_table = | pd.concat([master_subject_table,unique_dataframe]) | pandas.concat |
"""Compare different GNSS site velocity Where datasets
Description:
------------
A dictionary with datasets is used as input for this writer. The keys of the dictionary are station names.
Example:
--------
from where import data
from where import writers
# Read a dataset
dset = data.Dataset(rundate=rundate, tech=tech, stage=stage, dataset_name=name, dataset_id=dataset_id)
# Write dataset
writers.write_one('gnss_vel_comparison_report', dset=dset, do_report=False)
"""
# Standard library imports
import copy
from typing import Any, Dict
from pathlib import PosixPath
# External library imports
import numpy as np
import pandas as pd
# Midgard imports
from midgard.dev import plugins
from midgard.plot.matplotlib_extension import plot
# Where imports
from where.lib import config
from where.lib import log
from where.postprocessors.gnss_velocity_fields import gnss_velocity_fields
from where.writers._report import Report
FIGURE_FORMAT = "png"
FILE_NAME = __name__.split(".")[-1]
@plugins.register
def gnss_vel_comparison_report(dset: Dict[str, "Dataset"]) -> None:
"""Compare GNSS site velocity datasets
Args:
dset: Dictionary with station name as keys and the belonging Dataset as value
"""
dset_first = dset[list(dset.keys())[0]]
file_vars = {**dset_first.vars, **dset_first.analysis}
file_vars["solution"] = config.tech.gnss_vel_comparison_report.solution.str.lower()
# Generate figure directory to save figures generated for GNSS report
figure_dir = config.files.path("output_gnss_vel_comparison_report_figure", file_vars=file_vars)
figure_dir.mkdir(parents=True, exist_ok=True)
# Generate plots
_, dfs_day, dfs_month = _generate_dataframes(dset)
_plot_velocity_error(dfs_day, dfs_month, figure_dir, file_vars)
# Generate GNSS comparison report
path = config.files.path("output_gnss_vel_comparison_report", file_vars=file_vars)
with config.files.open_path(path, create_dirs=True, mode="wt") as fid:
rpt = Report(
fid, rundate=dset_first.analysis["rundate"], path=path, description="Comparison of GNSS SPV analyses"
)
rpt.title_page()
_add_to_report(rpt, figure_dir, dfs_day, dfs_month, file_vars)
rpt.markdown_to_pdf()
#
# AUXILIARY FUNCTIONS
#
def _apply(df: pd.core.frame.DataFrame, sample: str, func: str) -> pd.core.frame.DataFrame:
"""Resample dataframe and apply given function
Args:
df: Dataframe.
sample: Sample definition ("D": day, "M": month)
func: Function to be applied ("mean", "percentile", "rms", "std")
Returns:
Resampled dataframe by applying given function
"""
df_sampled = df.set_index("time_gps").resample(sample)
if func == "mean":
df_sampled = df_sampled.mean()
elif func == "percentile":
df_sampled = df_sampled.apply(lambda x: np.nanpercentile(x, q=95))
elif func == "rms":
df_sampled = df_sampled.apply(lambda x: np.sqrt(np.nanmean(np.square(x))))
elif func == "std":
df_sampled = df_sampled.std()
else:
log.fatal(f"Function '{func}' is not defined.")
return df_sampled
def _add_to_report(
rpt: "Report",
figure_dir: PosixPath,
dfs_day: Dict[str, pd.core.frame.DataFrame],
dfs_month: Dict[str, pd.core.frame.DataFrame],
file_vars: Dict[str, Any],
) -> None:
"""Add figures and tables to report
Args:
rpt: Report object.
figure_dir: Figure directory.
dfs_day: Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a dictionary as
values. The dictionary has fields as keys (e.g. site_vel_h, site_vel_3d) and the belonging
dataframe as value with DAILY samples of 95th percentile and stations as columns.
dfs_month Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a dictionary as
values. The dictionary has fields as keys (e.g. site_vel_h, site_vel_3d) and the belonging
dataframe as value with MONTHLY samples of 95th percentile and stations as columns.
file_vars: File variables used for file and plot title naming.
"""
text_def = {
"mean": "average",
"percentile": "95th percentile",
"std": "standard deviation",
"rms": "RMS",
}
field_def = {
"site_vel_east": "East site velocity component of topocentric coordinates",
"site_vel_north": "North site velocity component of topocentric coordinates",
"site_vel_up": "Up site velocity component of topocentric coordinates",
"site_vel_h": "2D site velocity",
"site_vel_3d": "3D site velocity",
}
for type_ in dfs_day.keys():
for sample in config.tech.gnss_vel_comparison_report.samples.list:
sample = sample.capitalize()
rpt.add_text(f"\n# {sample} {text_def[type_]} for given solutions\n\n")
if sample == "Daily":
for field in field_def.keys():
dfs_day[type_][field].index = dfs_day[type_][field].index.strftime("%d-%m-%Y")
rpt.add_text(f"Daily {text_def[type_]} {field.upper()} results in meter/second:")
rpt.write_dataframe_to_markdown(dfs_day[type_][field], format="6.3f", statistic=True)
elif sample == "Monthly":
for field in field_def.keys():
rpt.add_text(f"Monthly {text_def[type_]} {field.upper()} results in meter/second:")
rpt.write_dataframe_to_markdown(dfs_month[type_][field], format="6.3f")
# Add 2D and 3D velocity plots
for field in field_def.keys():
rpt.add_figure(
f"{figure_dir}/plot_{type_}_{field}_{sample.lower()}_{file_vars['date']}_{file_vars['solution'].lower()}.{FIGURE_FORMAT}",
caption=f"{text_def[type_].capitalize()} for {field_def[field]}.",
clearpage=True,
)
def _generate_dataframes(dset: Dict[str, "Dataset"]) -> Dict[str, pd.core.frame.DataFrame]:
"""Generate dataframe based on station datasets
The dataframe for each station in dictionary "dfs" has following columns:
site_vel_h: Horizontal site velocity
site_vel_east: Site velocity east component of topocentric coordinates
site_vel_north: Site velocity north component of topocentric coordinates
site_vel_up: Site velocity up component of topocentric coordinates
site_vel_3d: 3D site velocity
Example for "dfs" dictionary:
'hons': time.gps site_vel_h site_vel_3d
0 2019-03-01 00:00:00 0.301738 0.057244
1 2019-03-01 00:00:00 0.301738 0.057244
'krss': time.gps site_vel_h site_vel_3d
0 2019-03-01 00:00:00 0.710014 0.186791
1 2019-03-01 00:00:00 0.710014 0.186791
Example for "dfs_day" dictionary for "mean" key:
'mean':{
'site_vel_h': nabf vegs hons krss
time.gps
2019-03-01 1.368875 0.935687 1.136763 0.828754
2019-03-02 0.924839 0.728280 0.911677 0.854832
'site_vel_3d': nabf vegs hons krss
time.gps
2019-03-01 1.715893 1.147265 1.600330 0.976541
2019-03-02 1.533437 1.307373 1.476295 1.136991
}
Example for "dfs_month" dictionary for "mean" key:
'mean':{
'site_vel_h': nabf vegs hons krss
Mar-2019 1.186240 0.861718 1.095827 1.021354
Apr-2019 0.891947 0.850343 0.977908 0.971099
'site_vel_3d': nabf vegs hons krss
Mar-2019 1.854684 1.291406 1.450466 1.225467
Apr-2019 1.964404 1.706507 1.687994 1.500742
}
Args:
dset: Dictionary with station name as keys and the belonging Dataset as value
Returns:
Tuple with following entries:
| Element | Description |
|----------------------|--------------------------------------------------------------------------------------|
| dfs | Dictionary with station name as keys and the belonging dataframe as value with |
| | following dataframe columns: site_vel_h, site_vel_3d |
| dfs_day | Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a |
| | dictionary as values. The dictionary has fields as keys (e.g. site_vel_h, |
| | site_vel_3d) and the belonging dataframe as value with DAILY samples of 95th |
| | percentile and stations as columns. |
| dfs_month | Dictionary with function type as keys ('mean', 'percentile', 'rms', 'std') and a |
| | dictionary as values. The dictionary has fields as keys (e.g. site_vel_h, |
| | site_vel_3d) and the belonging dataframe as value with MONTHLY samples of 95th |
| | percentile and stations as columns. |
| dfs_month | Dictionary with fields as keys (e.g. site_vel_h, site_vel_3d) and the belonging |
| | dataframe as value with MONTHLY samples of 95th percentile and stations as columns. |
"""
dsets = dset
dfs = {}
fields = {
"site_vel_east": pd.DataFrame(),
"site_vel_north": pd.DataFrame(),
"site_vel_up": pd.DataFrame(),
"site_vel_h": pd.DataFrame(),
"site_vel_3d": | pd.DataFrame() | pandas.DataFrame |
"""
국토교통부 Open API
molit(Ministry of Land, Infrastructure and Transport)
1. Transaction 클래스: 부동산 실거래가 조회
- AptTrade: 아파트매매 실거래자료 조회
- AptTradeDetail: 아파트매매 실거래 상세 자료 조회
- AptRent: 아파트 전월세 자료 조회
- AptOwnership: 아파트 분양권전매 신고 자료 조회
- OffiTrade: 오피스텔 매매 신고 조회
- OffiRent: 오피스텔 전월세 신고 조회
- RHTrade: 연립다세대 매매 실거래자료 조회
- RHRent: 연립다세대 전월세 실거래자료 조회
- DHTrade: 단독/다가구 매매 실거래 조회
- DHRent: 단독/다가구 전월세 자료 조회
- LandTrade: 토지 매매 신고 조회
- BizTrade: 상업업무용 부동산 매매 신고 자료 조회
2. Building 클래스: 건축물대장정보 서비스
01 건축물대장 기본개요 조회: getBrBasisOulnInfo
02 건축물대장 총괄표제부 조회: getBrRecapTitleInfo
03 건축물대장 표제부 조회: getBrTitleInfo
04 건축물대장 층별개요 조회: getBrFlrOulnInfo
05 건축물대장 부속지번 조회: getBrAtchJibunInfo
06 건축물대장 전유공용면적 조회: getBrExposPubuseAreaInfo
07 건축물대장 오수정화시설 조회: getBrWclfInfo
08 건축물대장 주택가격 조회: getBrHsprcInfo
09 건축물대장 전유부 조회: getBrExposInfo
10 건축물대장 지역지구구역 조회: getBrJijiguInfo
"""
import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class Transaction:
"""
부동산 실거래가 조회 클래스
"""
def __init__(self, serviceKey):
"""
공공 데이터 포털에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비스 키 초기화
self.serviceKey = serviceKey
# ServiceKey 유효성 검사
self.urlAptTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey="
+ self.serviceKey)
self.urlAptTradeDetail = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTradeDev?serviceKey="
+ self.serviceKey)
self.urlAptRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptRent?serviceKey="
+ self.serviceKey)
self.urlAptOwnership = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSilvTrade?serviceKey="
+ self.serviceKey)
self.urlOffiTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiTrade?serviceKey="
+ self.serviceKey)
self.urlOffiRent = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiRent?serviceKey="
+ self.serviceKey)
self.urlRHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHTrade?serviceKey="
+ self.serviceKey)
self.urlRHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHRent?serviceKey="
+ self.serviceKey)
self.urlDHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHTrade?serviceKey="
+ self.serviceKey)
self.urlDHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHRent?serviceKey="
+ self.serviceKey)
self.urlLandTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcLandTrade?serviceKey="
+ self.serviceKey)
self.urlBizTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcNrgTrade?serviceKey="
+ self.serviceKey)
# Open API URL Dict
urlDict = {
"아파트매매 실거래자료 조회": self.urlAptTrade,
"아파트매매 실거래 상세 자료 조회": self.urlAptTradeDetail,
"아파트 전월세 자료 조회": self.urlAptRent,
"아파트 분양권전매 신고 자료 조회": self.urlAptOwnership,
"오피스텔 매매 신고 조회": self.urlOffiTrade,
"오피스텔 전월세 신고 조회": self.urlOffiRent,
"연립다세대 매매 실거래자료 조회": self.urlRHTrade,
"연립다세대 전월세 실거래자료 조회": self.urlRHRent,
"단독/다가구 매매 실거래 조회": self.urlDHTrade,
"단독/다가구 전월세 자료 조회": self.urlDHRent,
"토지 매매 신고 조회": self.urlLandTrade,
"상업업무용 부동산 매매 신고 자료 조회": self.urlBizTrade,
}
# 서비스 정상 작동 여부 확인
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} 서비스가 정상 작동합니다.")
else:
print(f">>> {serviceName} 서비스키 미등록 오류입니다.")
# 지역 코드 초기화
# 법정동 코드 출처 : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["폐지여부"] == "존재"]
code["법정구코드"] = list(map(lambda a: str(a)[:5], list(code["법정동코드"])))
self.code = code
def CodeFinder(self, name):
"""
국토교통부 실거래가 정보 오픈API는 법정동코드 10자리 중 앞 5자리인 구를 나타내는 지역코드를 사용합니다.
API에 사용할 구 별 코드를 조회하는 메서드이며, 문자열 지역 명을 입력받고, 조회 결과를 Pandas DataFrame형식으로 출력합니다.
"""
result = self.code[self.code["법정동명"].str.contains(name)][[
"법정동명", "법정구코드"
]]
result.index = range(len(result))
return result
def DataCollector(self, service, LAWD_CD, start_date, end_date):
"""
서비스별 기간별 조회
입력: 서비스별 조회 메서드, 지역코드, 시작월(YYYYmm), 종료월(YYYYmm)
"""
start_date = datetime.datetime.strptime(str(start_date), "%Y%m")
start_date = datetime.datetime.strftime(start_date, "%Y-%m")
end_date = datetime.datetime.strptime(str(end_date), "%Y%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = pd.date_range(start=start_date, end=end_date, freq="m")
date_list = list(ts.strftime("%Y%m"))
df = pd.DataFrame()
df_sum = pd.DataFrame()
for m in date_list:
print(">>> LAWD_CD :", LAWD_CD, "DEAL_YMD :", m)
DEAL_YMD = m
df = service(LAWD_CD, DEAL_YMD)
df_sum = pd.concat([df_sum, df])
df_sum.index = range(len(df_sum))
return df_sum
def AptTrade(self, LAWD_CD, DEAL_YMD):
"""
01 아파트매매 실거래자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = | pd.DataFrame() | pandas.DataFrame |
'''
Copyright (c) 2021 <NAME>, <NAME>, Technical University of Denmark
'''
# Import modules
import os
import pandas as pd
import freesasa as fs
from Bio.PDB import PDBParser
import pkg_resources
import json
from natsort import natsort_keygen
# Path to resource files
naccess_config = pkg_resources.resource_filename(__name__, 'naccess.config')
with open(pkg_resources.resource_filename(__name__, 'HVdiff_table.json')) as data_file:
HVdiff_table = json.load(data_file)
# Define functions
def calc_RSA(filepath):
'''Runs freesasa on PDB.'''
classifier = fs.Classifier(naccess_config)
structure = fs.Structure(filepath, classifier)
result = fs.calc(structure)
area_list = get_residueAreas(result)
return | pd.DataFrame(area_list, columns=('Chain', 'Number', 'Wild', 'RSA')) | pandas.DataFrame |
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import stats
from sklearn.metrics import mean_squared_error
import numpy as np
import torch
import torch.nn as nn
from copy import deepcopy
from numpy import inf
from math import exp, gamma
from datetime import timedelta
from sklearn.metrics import r2_score
import matplotlib.patheffects as PathEffects
from scipy.special import softmax
import warnings
import os
import math
from scipy.stats import pearsonr, spearmanr
warnings.simplefilter("ignore")
plt.style.use(['science'])
plt.rcParams["text.usetex"] = True
indicators = ['Population ages 65 and above (% of total population)', \
'Population ages 15-64 (% of total population)',\
'Population ages 0-14 (% of total population)', \
'People with basic handwashing facilities including soap and water (% of population)',\
'Average Yearly Temperature (C)',\
'O', 'B', 'B1','B2', 'B4', 'A3', 'A6', 'A7', 'A1a', 'A2', 'A2a',\
'Trade with China Exports + Import US$ billion 2018',\
'Air transport, passenger carried 2018 (million) WB',\
'Stringency Score Avg per day after 100 patients reported']
params = ['peaks diff', 'total cases', 'total deaths', 'cases/pop', 'deaths/pop', 'mortality', 'k new', 'a new', 'b new', 'g new', 'k dead', 'a dead', 'b dead', 'g dead']
df = pd.read_excel('correlation.xlsx', sheet_name='Raw Data (deaths)')
df.replace([np.inf, -np.inf, np.nan, ''], 0, inplace=True)
corrfunc = pearsonr
correlationdata = []; pdata = []
for i in indicators:
result = [corrfunc(df[p],df[i]) for p in params]
correlationdata.append([i] + [res[0] for res in result])
pdata.append([i] + [res[1] for res in result])
df2 = pd.DataFrame(correlationdata,columns=['Indicator']+params)
df2p = | pd.DataFrame(pdata, columns=['Indicator']+params) | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = | pd.to_datetime('2010-01-05') | pandas.to_datetime |
# coding: utf-8
'''
from: examples/tutorial/fifth.cc
to: fifth.py
time: 20101110.1948.
//
// node 0 node 1
// +----------------+ +----------------+
// | ns-3 TCP | | ns-3 TCP |
// +----------------+ +----------------+
// | 10.1.1.1 | | 10.1.1.2 |
// +----------------+ +----------------+
// | point-to-point | | point-to-point |
// +----------------+ +----------------+
// | |
// +---------------------+
// 5 Mbps, 2 ms
//
//
// We want to look at changes in the ns-3 TCP congestion window. We need
// to crank up a flow and hook the CongestionWindow attribute on the socket
// of the sender. Normally one would use an on-off application to generate a
// flow, but this has a couple of problems. First, the socket of the on-off
// application is not created until Application Start time, so we wouldn't be
// able to hook the socket (now) at configuration time. Second, even if we
// could arrange a call after start time, the socket is not public so we
// couldn't get at it.
//
// So, we can cook up a simple version of the on-off application that does what
// we want. On the plus side we don't need all of the complexity of the on-off
// application. On the minus side, we don't have a helper, so we have to get
// a little more involved in the details, but this is trivial.
//
// So first, we create a socket and do the trace connect on it; then we pass
// this socket into the constructor of our simple application which we then
// install in the source node.
'''
import sys
import ns.applications
import ns.core
import ns.internet
import ns.network
import ns.point_to_point
import ns3
import pandas as pd
import pandas as pd
import numpy as np
import scipy
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
import seaborn as sns
import statsmodels as sm
import scipy.stats as stats
import matplotlib.pyplot as plt
import os
import statsmodels.distributions.empirical_distribution as edf
from scipy.interpolate import interp1d
from scipy.stats.distributions import chi2
import random
# Desligando avisos
import warnings
warnings.filterwarnings("ignore")
# Opções de geração por "Trace" ou "PD"(Probability Distribution)
mt_RG = "PD"
# Opções de geração de números aleatórios por "tcdf" ou "ecdf"
tr_RG = "tcdf"
# Definindo variáveis globais
# Auxilia da geração de tempos na rede
aux_global_time = 0
# Variável que auxilia se os arquivos de trace estão prontos para serem lidos
# tr_reader = True
# Define o parametro de rede utilizado nas funções
parameter = ""
# Armazena em np.arrays() os dados dos traces
t_time = np.empty(1)
t_size = np.empty(1)
# Variáveis que armazenam os parametros das distribuições de probabilidade
# time
dist_time = ""
arg_time = []
loc_time = 0
scale_time = 0
# size
dist_size = ""
arg_size = []
loc_size = 0
scale_size = 0
# Variável de auxilio de parada da função tcdf
first_tcdf_time = 0
first_tcdf_size = 0
# Variável de auxilio de parada da função read_trace
first_trace_time = 0
first_trace_size = 0
# Definindo se o trace é ".txt" ou "xml"
reader = "txt"
size_xml = 0
stop_xml = 0
# Função de leitura dos arquivos xml
def read_xml(parameter):
global size_xml
global stop_xml
ifile = open('scratch/results-http-docker.pdml','r')
print(ifile)
columns = ["length", "time"]
df = pd.DataFrame(columns = columns)
data0 = []
data1 = []
for line in ifile.readlines():
if ("httpSample" in line and "</httpSample>" not in line):
data0.append(line)
if ("httpSample" in line and "</httpSample>" not in line):
data1.append(line)
ifile.close()
# Save parameters in DataFrames and Export to .txt
df = pd.DataFrame(list(zip(data0, data1)), columns=['length', 'time'])
df['length'] = df['length'].str.split('by="').str[-1]
df['time'] = df['time'].str.split('ts="').str[-1]
df['length'] = df['length'].str.split('"').str[0]
df['time'] = df['time'].str.split('"').str[0]
df["length"] = pd.to_numeric(df["length"],errors='coerce')
df["time"] = pd.to_numeric(df["time"],errors='coerce')
print("DF: ", df)
size_xml = len(df["time"])
stop_xml = df["time"]
print("STOP: ", len(stop_xml))
stop_xml = stop_xml[len(stop_xml)-1]
if parameter == "Size":
# Chamando variáveis globais
global t_size
global first_trace_size
# Abrindo arquivos .txt
t_size = np.array(df['length'])
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
plt.hist(t_size)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# y_size_df = pd.DataFrame(y_size, columns=['Size'])
# y_size_df.describe()
# Definindo que o parametro size pode ser lido apenas uma vez.
first_trace_size = 1
if parameter == "Time":
# Chamando variáveis globais
global t_time
global first_trace_time
# Abrindo arquivos .txt
t_time = np.array(df['time'])
# Obtendo os tempos entre pacotes do trace
sub = []
i=0
for i in range(len(t_time)-1):
sub.append(t_time[i+1] - t_time[i])
# Passando valores resultantes para a variável padrão t_time
t_time = np.array(sub)
# print("Trace Time: ", t_time)
# Plot histograma t_time:
plt.hist(t_time)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas pode-se encontrar algumas estatísticas importantes.
# t_time_df = pd.DataFrame(t_time, columns=['Time'])
# t_time_df.describe()
# Definindo que o parametro time pode ser lido apenas uma vez.
first_trace_time = 1
# Função de leitura dos traces e atribuição dos respectivos dados aos vetores
def read_txt(parameter):
if parameter == "Size":
# Chamando variáveis globais
global t_size
global first_trace_size
# Abrindo arquivos .txt
t_size = np.loadtxt("scratch/size.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
plt.hist(t_size)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# y_size_df = pd.DataFrame(y_size, columns=['Size'])
# y_size_df.describe()
# Definindo que o parametro size pode ser lido apenas uma vez.
first_trace_size = 1
if parameter == "Time":
# Chamando variáveis globais
global t_time
global first_trace_time
# Abrindo arquivos .txt
t_time = np.loadtxt("scratch/time.txt", usecols=0)
# Obtendo os tempos entre pacotes do trace
sub = []
i=0
for i in range(len(t_time)-1):
sub.append(t_time[i+1] - t_time[i])
# Passando valores resultantes para a variável padrão t_time
t_time = np.array(sub)
# print("Trace Time: ", t_time)
# Plot histograma t_time:
plt.hist(t_time)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas pode-se encontrar algumas estatísticas importantes.
# t_time_df = pd.DataFrame(t_time, columns=['Time'])
# t_time_df.describe()
# Definindo que o parametro time pode ser lido apenas uma vez.
first_trace_time = 1
# Função de geração de variáveis aleatórias por meio da ECDF
def ecdf(y, parameter):
# Criando listas para os dados utilizados
Fx = []
Fx_ = []
# Realizando ajustes para os vetores que selecionaram os valores gerados
for i in range(len(y)):
Fx.append(i/(len(y)+1))
if i != 0:
Fx_.append(i/(len(y)+1))
# Adicionando 1 no vetor Fx_
Fx_.append(1)
# print ("Fx: ", len(Fx))
# print ("Fx_: ", len(Fx_))
# Organizando o vetor com os dados do trace
y.sort()
# print ("Y: ", len(y))
# Gerando um valor aleatório entre 0 e 1 uniforme
rand = np.random.uniform(0,1)
# print("Rand: ", rand)
# Pecorrer todos os valores do vetor com dados do trace
# para determinar o valor a ser gerado de acordo com o resultado da distribuição uniforme
for i in range(len(y)):
# Condição que define em qual classe o valor é encontrado
if rand > Fx[i] and rand < Fx_[i]:
# Determinando o valor resultante
r_N = y[i]
# Condição para retorno do valor de acordo com o parametro de rede.
if parameter == "Size":
# print ("ECDF SIZE: ", r_N)
return(int(r_N))
if parameter == "Time":
# print ("ECDF TIME: ", r_N)
return(r_N)
# Função para definir a distribuição de probabilidade compatível com os
# valores do trace utilizada para gerar valores aleatórios por TCDF
def tcdf(y, parameter):
# Indexar o vetor y pelo vetor x
x = np.arange(len(y))
# Definindo o tamanho da massa de dados
size = len(x)
# Definindo a quantidade de bins (classes) dos dados
nbins = int(np.sqrt(size))
# Normalização dos dados
sc=StandardScaler()
yy = y.reshape (-1,1)
sc.fit(yy)
y_std = sc.transform(yy)
y_std = y_std.flatten()
del yy
# O python pode relatar avisos enquanto executa as distribuições
# Mais distribuições podem ser encontradas no site da lib "scipy"
# Veja https://docs.scipy.org/doc/scipy/reference/stats.html para mais detalhes
dist_names = ['erlang',
'expon',
'gamma',
'lognorm',
'norm',
'pareto',
'triang',
'uniform',
'dweibull',
'weibull_min',
'weibull_max']
# Obter os métodos de inferência KS test e Chi-squared
# Configurar listas vazias para receber os resultados
chi_square = []
ks_values = []
#--------------------------------------------------------#
# Chi-square
# Configurar os intervalos de classe (nbins) para o teste qui-quadrado
# Os dados observados serão distribuídos uniformemente em todos os inervalos de classes
percentile_bins = np.linspace(0,100,nbins)
percentile_cutoffs = np.percentile(y, percentile_bins)
observed_frequency, bins = (np.histogram(y, bins=percentile_cutoffs))
cum_observed_frequency = np.cumsum(observed_frequency)
# Repetir para as distribuições candidatas
for distribution in dist_names:
# Configurando a distribuição e obtendo os parâmetros ajustados da distribuição
dist = getattr(scipy.stats, distribution)
param = dist.fit(y)
#
# KS TEST
#
# Criando percentil
percentile = np.linspace(0,100,len(y))
percentile_cut = np.percentile(y, percentile)
# Criando CDF da teórica
Ft = dist.cdf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Criando CDF Inversa
Ft_ = dist.ppf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Adicionando dados do trace
t_Fe = y
# Criando listas para armazenar as ECDFs
Fe = []
Fe_ = []
# Criando ECDFs
for i in range(len(y)):
# ecdf i-1/n
Fe.append((i-1)/len(y))
# ecdf i/n
Fe_.append(i/len(y))
# Transformando listas em np.arrays()
Fe = np.array(Fe)
Fe_ = np.array(Fe_)
Ft = np.array(Ft)
Ft_ = np.array(Ft_)
# Ordenando dados
t_Fe.sort()
Ft.sort()
Ft_.sort()
Fe.sort()
Fe_.sort()
# Inicio cálculo de rejeição
#
# Ft(t)-FE-(i),FE+(i)-Ft(t)
Ft_Fe_ = np.subtract(Ft, Fe_)
Fe_Ft = np.subtract(Fe, Ft)
# Max(Ft(t)-FE-(i),FE+(i)-Ft(t))
Dobs_max = np.maximum(Ft_Fe_, Fe_Ft)
# Dobs= Max(Max (Ft(t)-FE-(i),FE+(i)-Ft(t)))
Dobs = np.max(Dobs_max)
#
# Fim cálculo de rejeição
# Definir intervalo de confiança
# IC = 99.90 -> alpha = 0.10
# IC = 99.95 -> alpha = 0.05
# IC = 99.975 -> alpha = 0.025
# IC = 99.99 -> alpha = 0.01
# IC = 99.995 -> alpha = 0.005
# IC = 99.999 -> alpha = 0.001
IC = 99.90
# Condição para definir o D_critico de acordo com o tamanho dos dados
if size > 35:
if IC == 99.90:
D_critico = 1.22/np.sqrt(len(y))
if IC == 99.95:
D_critico = 1.36/np.sqrt(len(y))
if IC == 99.975:
D_critico = 1.48/np.sqrt(len(y))
if IC == 99.99:
D_critico = 1.63/np.sqrt(len(y))
if IC == 99.995:
D_critico = 1.73/np.sqrt(len(y))
if IC == 99.999:
D_critico = 1.95/np.sqrt(len(y))
# Condição para aceitar a hipótese nula do teste KS
if Dobs > D_critico:
rejects = "Reject the Null Hypothesis"
else:
rejects = "Fails to Reject the Null Hypothesis"
# Imprimindo resultados do KS Test
print(" ")
print("KS TEST:")
print("Confidence degree: ", IC,"%")
print(rejects, " of ", distribution)
print("D observed: ", Dobs)
print("D critical: ", D_critico)
print(" ")
# Obtém a estatística do teste KS e arredonda para 5 casas decimais
Dobs = np.around(Dobs, 5)
ks_values.append(Dobs)
#
# CHI-SQUARE
#
# Obter contagens esperadas nos percentis
# Isso se baseia em uma 'função de distribuição acumulada' (cdf)
cdf_fitted = dist.cdf(percentile_cutoffs, *param[:-2], loc=param[-2], scale=param[-1])
# Definindo a frequência esperada
expected_frequency = []
for bin in range(len(percentile_bins)-1):
expected_cdf_area = cdf_fitted[bin+1] - cdf_fitted[bin]
expected_frequency.append(expected_cdf_area)
# Calculando o qui-quadrado
expected_frequency = np.array(expected_frequency) * size
cum_expected_frequency = np.cumsum(expected_frequency)
ss = sum (((cum_expected_frequency - cum_observed_frequency) ** 2) / cum_observed_frequency)
chi_square.append(ss)
# Set x² with IC
IC = IC/100
x2 = chi2.ppf(IC, nbins-1)
# Imprimindo resultados do teste Chi-square
print(" ")
print("Chi-square test: ")
print("Confidence degree: ", IC,"%")
print("CS: ", ss)
print("X²: ", x2)
# Condição para aceitar a hipótese nula do teste Chi-square
if x2 > ss:
print("Fails to Reject the Null Hipothesis of ", distribution)
else:
print("Rejects the Null Hipothesis of ", distribution)
print(" ")
# Agrupar os resultados e classificar por qualidade de ajuste de acordo com o teste KS (melhor na parte superior)
results = pd.DataFrame()
results['Distribution'] = dist_names
results['ks_value'] = ks_values
results['chi_square'] = chi_square
results.sort_values(['ks_value'], inplace=True, ascending=True)
# Apresentar os resultados em uma tabela
print ('\nDistributions sorted by KS Test:')
print ('----------------------------------------')
print (results)
# Divida os dados observados em N posições para plotagem (isso pode ser alterado)
bin_cutoffs = np.linspace(np.percentile(y,0), np.percentile(y,99), nbins)
# Crie o gráfico
h = plt.hist(y, bins = bin_cutoffs, color='0.75')
# Receba as principais distribuições da fase anterior
# e seleciona a quantidade de distribuições.
number_distributions_to_plot = 1
dist_names = results['Distribution'].iloc[0:number_distributions_to_plot]
# Crie uma lista vazia para armazenar parâmetros de distribuição ajustada
parameters = []
# Faça um loop pelas distribuições para obter o ajuste e os parâmetros da linha
for dist_name in dist_names:
# Chamando variáveis globais
global arg_time
global loc_time
global scale_time
global dist_time
global arg_size
global loc_size
global scale_size
global dist_size
# Obtendo distribuições e seus parametros de acordo com o trace
dist = getattr(scipy.stats, dist_name)
param = dist.fit(y)
parameters.append(param)
arg = param[:-2]
loc = param[-2]
scale = param[-1]
print(parameters)
if parameter == "Time":
dist_time = dist_name
loc_time = loc
scale_time = scale
arg_time = arg
if parameter == "Size":
dist_size = dist_name
loc_size = loc
scale_size = scale
arg_size = arg
# Obter linha para cada distribuição (e dimensionar para corresponder aos dados observados)
pdf_fitted = dist.pdf(x, *param[:-2], loc=param[-2], scale=param[-1])
scale_pdf = np.trapz (h[0], h[1][:-1]) / np.trapz (pdf_fitted, x)
pdf_fitted *= scale_pdf
# Adicione a linha ao gráfico
plt.plot(pdf_fitted, label=dist_name)
# Defina o eixo gráfico x para conter 99% dos dados
# Isso pode ser removido, mas, às vezes, dados fora de padrão tornam o gráfico menos claro
plt.xlim(0,np.percentile(y,99))
plt.title("Histogram of trace (" + parameter + ") + theorical distribuition " + dist_name)
# Adicionar legenda
plt.legend()
plt.show()
# Armazenar parâmetros de distribuição em um quadro de dados (isso também pode ser salvo)
dist_parameters = pd.DataFrame()
dist_parameters['Distribution'] = (
results['Distribution'].iloc[0:number_distributions_to_plot])
dist_parameters['Distribution parameters'] = parameters
# Printar os parâmetros
print ('\nDistribution parameters:')
print ('------------------------')
for row in dist_parameters.iterrows():
print ('\nDistribution:', row[0])
print ('Parameters:', row[1] )
# Plotando gráficos de inferência
data = y_std.copy()
# data = y
data.sort()
# Loop through selected distributions (as previously selected)
for distribution in dist_names:
# Set up distribution
dist = getattr(scipy.stats, distribution)
param = dist.fit(y)
#
# KS TEST
#
# Criando percentil
percentile = np.linspace(0,100,len(y))
percentile_cut = np.percentile(y, percentile)
# Criando CDF da teórica
Ft = dist.cdf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Criando CDF Inversa
Ft_ = dist.ppf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Adicionando dados do trace
t_Fe = y
# Ordenando dados
t_Fe.sort()
Ft.sort()
Ft_.sort()
# Criando listas para armazenar as ECDFs
Fe = []
Fe_ = []
# Criando ECDFs
for i in range(len(y)):
# ecdf i-1/n
Fe.append((i-1)/len(y))
# ecdf i/n
Fe_.append(i/len(y))
# Transformando listas em np.arrays()
Fe = np.array(Fe)
Fe_ = np.array(Fe_)
Ft = np.array(Ft)
Ft_ = np.array(Ft_)
# Inicio cálculo de rejeição
#
# Ft(t)-FE-(i),FE+(i)-Ft(t)
Ft_Fe_ = np.subtract(Ft, Fe_)
Fe_Ft = np.subtract(Fe, Ft)
# Max(Ft(t)-FE-(i),FE+(i)-Ft(t))
Dobs_max = np.maximum(Ft_Fe_, Fe_Ft)
# Dobs= Max(Max (Ft(t)-FE-(i),FE+(i)-Ft(t)))
Dobs = np.max(Dobs_max)
#
# Fim cálculo de rejeição
# Definir intervalo de confiança
# IC = 99.90 -> alpha = 0.10
# IC = 99.95 -> alpha = 0.05
# IC = 99.975 -> alpha = 0.025
# IC = 99.99 -> alpha = 0.01
# IC = 99.995 -> alpha = 0.005
# IC = 99.999 -> alpha = 0.001
IC = 99.95
# Condição para definir o D_critico de acordo com o tamanho dos dados
if size > 35:
if IC == 99.90:
D_critico = 1.22/np.sqrt(len(y))
if IC == 99.95:
D_critico = 1.36/np.sqrt(len(y))
if IC == 99.975:
D_critico = 1.48/np.sqrt(len(y))
if IC == 99.99:
D_critico = 1.63/np.sqrt(len(y))
if IC == 99.995:
D_critico = 1.73/np.sqrt(len(y))
if IC == 99.999:
D_critico = 1.95/np.sqrt(len(y))
# Condição para aceitar a hipótese nula do teste KS
if Dobs > D_critico:
rejects = "Reject the Null Hypothesis"
else:
rejects = "Fails to Reject the Null Hypothesis"
# Imprimindo resultados do KS Test
print("KS TEST:")
print("Confidence degree: ", IC,"%")
print(rejects, " of ", distribution)
print("D observed: ", Dobs)
print("D critical: ", D_critico)
print(" ")
# Plotando resultados do teste KS
plt.plot(t_Fe, Ft, 'o', label='Teorical Distribution')
plt.plot(t_Fe, Fe, 'o', label='Empirical Distribution')
# plt.plot(t_Fe, Fe, 'o', label='Real Trace')
# plt.plot(Ft, Fe, 'o', label='Syntatic Trace')
# Definindo titulo
plt.title("KS Test of Real Trace with " + distribution + " Distribution (" + parameter + ")")
plt.legend()
plt.show()
global first_tcdf_time
global first_tcdf_size
if parameter == "Size":
first_tcdf_size = 1
if parameter == "Time":
first_tcdf_time = 1
# Função de geração de variáveis aleatórias por meio da TCDF
def tcdf_generate(dist, loc, scale, arg, parameter):
# Setar distribuição escolhida.
dist_name = getattr(scipy.stats, dist)
# Gerar número aleatório de acordo com a distribuição escolhida e seus parametros.
r_N = dist_name.rvs(loc=loc, scale=scale, *arg)
# Condição para retorno do valor de acordo com o parametro de rede.
if parameter == "Size":
# print("SIZE R_N:", r_N)
return(int(abs(r_N)))
if parameter == "Time":
# print("TIME R_N:", r_N)
return(float(abs(r_N)))
# Função de geração de variáveis aleatórias de acordo com distribuições
# de probabilidade e parametros definidos
def wgwnet_PD(parameter):
# Mais distribuições podem ser encontradas no site da lib "scipy"
# Veja https://docs.scipy.org/doc/scipy/reference/stats.html para mais detalhes
if parameter == "Size":
# Selecionando distribuição de probabilidade para o parametro Size
dist_name = 'uniform'
# Definindo parametros da distribuição
loc = 500
scale = 500
arg = []
# Setando distribuição a escolhida e seus parametros
dist = getattr(scipy.stats, dist_name)
# Gerando número aleatório de acordo com a distribuiução e os parametros definidos
r_N = dist.rvs(loc=loc, scale=scale, *arg, size=1)
print("Size: ", r_N)
return(int(r_N))
if parameter == "Time":
# Selecionando distribuição de probabilidade para o parametro Size
dist_name = 'uniform'
# Definindo parametros da distribuição
loc = 0.5
scale = 0.8
arg = []
# Setando distribuição a escolhida e seus parametros
dist = getattr(scipy.stats, dist_name)
# Gerando número aleatório de acordo com a distribuiução e os parametros definidos
r_N = dist.rvs(loc=loc, scale=scale, *arg, size=1)
return(float(r_N))
# Classe de criação da aplicação do NS3
class MyApp(ns3.Application):
# Criando variáveis auxiliares
tid = ns3.TypeId("MyApp")
tid.SetParent(ns3.Application.GetTypeId())
m_socket = m_packetSize = m_nPackets = m_dataRate = m_packetsSent = 0
m_peer = m_sendEvent = None
m_running = False
count_Setup = count_Start = count_Stop = count_SendPacket = count_ScheduleTx = count_GetSendPacket = count_GetTypeId = 0
# Inicializador da simulação
def __init__(self):
super(MyApp, self).__init__()
# def Setup(self, socket, address, packetSize, nPackets, dataRate):
# Função de configuração da aplicação
def Setup(self, socket, address, nPackets):
self.count_Setup = self.count_Setup + 1
self.m_socket = socket
self.m_peer = address
# self.m_packetSize = packetSize
self.m_nPackets = nPackets
# self.m_dataRate = dataRate
# Função de inicialização da aplicação
def StartApplication(self):
self.count_Start = self.count_Start + 1
if self.m_nPackets > 0 and self.m_nPackets > self.m_packetsSent:
self.m_running = True
self.m_packetsSent = 0
self.m_socket.Bind()
self.m_socket.Connect(self.m_peer)
self.SendPacket()
else:
self.StopApplication()
# Função de parada da aplicação
def StopApplication(self):
self.count_Stop = self.count_Stop + 1
self.m_running = False
if self.m_sendEvent != None and self.m_sendEvent.IsRunning() == True:
ns3.Simulator.Cancel(self.m_sendEvent)
if self.m_socket:
self.m_socket.Close()
# Função de envio de pacotes
def SendPacket(self):
# Contabiliza a quantidade de pacotes enviados
self.count_SendPacket = self.count_SendPacket + 1
# Chamando variáveis globais
# Método de Geração de RN
global mt_RG
# Metodo de geração de RN por trace
global tr_RG
# Vetor com dados do parametro de tamanho dos pacotes obtidos do trace
global t_size
global parameter
global arg_size
global scale_size
global loc_size
global dist_size
global first_tcdf_size
global first_trace_size
global reader
parameter = "Size"
# Condição de escolha do método de geração de variáveis aleatórias
# diretamente por uma distribuição de probabiidade
if mt_RG == "PD":
# Chamando a função wgwnet_PD() e retornando valor gerado para uma variável auxiliar
aux_packet = wgwnet_PD(parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Condição de escolha do método de geração de variáveis aleatórias
# baseado nos dados do trace
if mt_RG == "Trace":
if first_trace_size == 0:
# Definindo o método de leitura do arquivo trace
if reader == "txt":
read_txt(parameter)
if reader == "xml":
read_xml(parameter)
# Condição de escolha do método por distribuições teórica equivalentes aos dados do trace
if tr_RG == "tcdf":
# Condição de chamada única da função tcdf()
if first_tcdf_size == 0:
# Chamando a função tcdf para definir a distribuição de probabilidade compatível ao trace e
# seus respectivos parametros para geração de números aleatórios
tcdf(t_size, parameter)
# Chamando a função tcdf_generate e retornando valor gerado para uma variável auxiliar
aux_packet = tcdf_generate(dist_size, loc_size, scale_size, arg_size, parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Condição de escolha do método pela distribuição empírica dos dados do trace
if tr_RG == "ecdf":
# Chamando a função ecdf e retornando valor gerado para uma variável auxiliar
aux_packet = ecdf(t_size, parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Imprimindo o tempo de envio do pacote e a quantidade de pacotes enviados
print ("SendPacket(): ", str(ns3.Simulator.Now().GetSeconds()), "s,\t send ", str(self.m_packetsSent), " Size ", packet.GetSize(), "#")
# Configurando o socket da rede para enviar o pacote
self.m_socket.Send(packet, 0)
# Incrementando a quantidade de pacotes enviados
self.m_packetsSent = self.m_packetsSent + 1
# Condição de parada da aplicação pela quantidade máxima de pacotes
if self.m_packetsSent < self.m_nPackets:
self.ScheduleTx()
else:
self.StopApplication()
# Função que prepara os eventos de envio de pacotes
def ScheduleTx(self):
# Contabiliza a quantidade eventos que ocorrem na simulação
self.count_ScheduleTx = self.count_ScheduleTx + 1
# Condição que define se a aplicação ainda terá eventos
if self.m_running:
# Chamando variáveis globais
# Auxiliar de tempo
global aux_global_time
# Método de Geração de RN
global mt_RG
# Metodo de geração de RN por trace
global tr_RG
# Vetor com dados do parametro de tamanho dos pacotes obtidos do trace
global t_time
global parameter
global arg_time
global scale_time
global loc_time
global dist_time
global first_tcdf_time
global first_trace_time
global reader
parameter = "Time"
# Condição de escolha do método de geração de variáveis aleatórias
# diretamente por uma distribuição de probabiidade
if mt_RG == "PD":
# Chamando a função wgwnet_PD() e retornando valor gerado para uma variável auxiliar
aux_global_time = wgwnet_PD(parameter)
# Condição de escolha do método de geração de variáveis aleatórias
# baseado nos dados do trace
if mt_RG == "Trace":
# Definindo o método de leitura do arquivo trace
if first_trace_time == 0:
if reader == "txt":
read_txt(parameter)
if reader == "xml":
read_xml(parameter)
# Condição de escolha do método por distribuições teórica equivalentes aos dados do trace
if tr_RG == "tcdf":
# Condição de chamada única da função tcdf()
if first_tcdf_time == 0:
# Chamando a função tcdf para definir a distribuição de probabilidade compatível ao trace e
# seus respectivos parametros para geração de números aleatórios
tcdf(t_time, parameter)
# Chamando a função tcdf_generate e retornando valor gerado para uma variável auxiliar
aux_global_time = tcdf_generate(dist_time, loc_time, scale_time, arg_time, parameter)
# Condição de escolha do método pela distribuição empírica dos dados do trace
if tr_RG == "ecdf":
# Chamando a função ecdf e retornando valor gerado para uma variável auxiliar
aux_global_time = ecdf(t_time, parameter)
# Transformando a variávei auxiliar em um metadado de tempo
tNext = ns3.Seconds(aux_global_time)
# dataRate = "1Mbps"
# packetSize = 1024
# tNext = ns3.Seconds(packetSize * 8.0 / ns3.DataRate(dataRate).GetBitRate())
# print("tNEXT: ", tNext)
# Criando evento de envio de pacote
self.m_sendEvent = ns3.Simulator.Schedule(tNext, MyApp.SendPacket, self)
def GetSendPacket(self):
self.count_GetSendPacket = self.count_GetSendPacket + 1
return self.m_packetsSent
def GetTypeId(self):
self.count_GetTypeId = self.count_GetTypeId + 1
return self.tid
# Função de definição da janela de congestionamento
def CwndChange(app):
# CwndChange():
# n = app.GetSendPacket()
# print ('CwndChange(): ' + str(ns3.Simulator.Now().GetSeconds()) + 's, \t sum(send packets) = ' + str(n))
ns3.Simulator.Schedule(ns3.Seconds(1), CwndChange, app)
# def ChangeRate(self, ns3.DataRate newrate):
# newrate = "1Mbps"
# self.m_dataRate = newrate
# def IncRate(self, app):
# app.ChangeRate(self.m_dataRate)
# Função de impressão dos resultados da simulação do NS3
def print_stats(os, st):
# os = open("stats.txt", "w")
print (os, " Duration: ", (st.timeLastRxPacket.GetSeconds()-st.timeFirstTxPacket.GetSeconds()))
print (os, " Last Packet Time: ", st.timeLastRxPacket.GetSeconds(), " Seconds")
print (os, " Tx Bytes: ", st.txBytes)
print (os, " Rx Bytes: ", st.rxBytes)
print (os, " Tx Packets: ", st.txPackets)
print (os, " Rx Packets: ", st.rxPackets)
print (os, " Lost Packets: ", st.lostPackets)
if st.rxPackets > 0:
print (os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets))
print (os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets)))
print (os, " Throughput ", (st.rxBytes * 8.0 / (st.timeLastRxPacket.GetSeconds()-st.timeFirstTxPacket.GetSeconds())/1024/1024), "MB/S")
print (os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1)
# std::cout<<"Duration : "<<()<<std::endl;
# std::cout<<"Last Received Packet : "<< stats->second.timeLastRxPacket.GetSeconds()<<" Seconds"<<std::endl;
# std::cout<<"Throughput: " << stats->second.rxBytes * 8.0 / (stats->second.timeLastRxPacket.GetSeconds()-stats->second.timeFirstTxPacket.GetSeconds())/1024/1024 << " Mbps"<<std::endl;
if st.rxPackets == 0:
print (os, "Delay Histogram")
for i in range(st.delayHistogram.GetNBins()):
print (os, " ", i, "(", st.delayHistogram.GetBinStart(i), "-", st.delayHistogram.GetBinEnd(i), "): ", st.delayHistogram.GetBinCount(i))
print (os, "Jitter Histogram")
for i in range(st.jitterHistogram.GetNBins()):
print (os, " ", i, "(", st.jitterHistogram.GetBinStart(i), "-", st.jitterHistogram.GetBinEnd(i), "): ", st.jitterHistogram.GetBinCount(i))
print (os, "PacketSize Histogram")
for i in range(st.packetSizeHistogram.GetNBins()):
print (os, " ", i, "(", st.packetSizeHistogram.GetBinStart(i), "-", st.packetSizeHistogram.GetBinEnd(i), "): ", st.packetSizeHistogram.GetBinCount(i))
for reason, drops in enumerate(st.packetsDropped):
print (" Packets dropped by reason ", reason ,": ", drops)
# for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
# Função de comparação dos resultados obtidos com o NS3 com os dados dos traces
# Esta função é utilizada apenas quando o método de geração variáveis aleatórias selecionado é por "Trace"
def compare(app_protocol):
compare = ""
# Chamando variáveis globais
global t_time
global t_size
# global time_ns3
# global size_ns3
if app_protocol == "tcp":
############################# SIZE #############################
# Abrindo arquivos .txt
rd_size_ns3 = np.loadtxt("scratch/tcp_size.txt", usecols=0)
rd_tsval_ns3 = np.loadtxt("scratch/tcp_tsval.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
# plt.hist(size_ns3)
# plt.title("Histogram of trace (size) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# size_ns3_df = pd.DataFrame(size_ns3, columns=['TSVAL','Size'])
size_ns3_df = pd.DataFrame(list(zip(rd_tsval_ns3,rd_size_ns3)), columns=['TSVAL','Size'])
size_ns3_df = size_ns3_df[size_ns3_df.Size != 0]
size_ns3_df = size_ns3_df.groupby("TSVAL").sum()
size_ns3_df["Size"] = | pd.to_numeric(size_ns3_df["Size"]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
| ujson.dump([], "") | pandas._libs.json.dump |
#!/usr/bin/env python3
import sys
sys.path.extend(['.', '..'])
import argparse
import os
import gensim
import numpy as np
import pandas as pd
from scipy import stats
from Bio import pairwise2
import matplotlib.pyplot as plt
plt.style.use('seaborn-colorblind')
from dna2vec.multi_k_model import MultiKModel
# Helper functions
def get_mers(k, num_mers):
'''
This produces <num_mers> random sequences of length k.
'''
bases = ['A', 'T', 'C', 'G']
temp = np.random.choice(bases, size=(num_mers, k))
return [''.join(x) for x in temp]
def most_similar(in_model, vocab,
same_k=True, is_vector=False,
topn=10):
# Note this only works for returning k-mers of the same length.
if same_k:
return in_model.data[len(vocab)].model.most_similar(vocab, topn=topn)
else:
# Make vector representation of what we're searching for if needed.
if is_vector == False:
vec = in_model.vector(vocab)
else:
vec = vocab
# These are for sorting the output.
dtype = [('kmer', 'S10'), ('cosine', float)]
scores = []
for model_wrap in in_model.data.values():
temp = model_wrap.model.similar_by_vector(vec, topn=topn)
scores.append(np.array(temp, dtype=dtype))
return np.sort(np.concatenate(scores, axis=0), axis=0, order='cosine')[:-(topn + 1):-1]
# Experiments in preprint
def cosine_alignment_experiment(num_mers=10000):
test_mers0 = get_mers(7, num_mers)
test_mers1 = get_mers(7, num_mers)
ts = []
for i in range(len(test_mers0)):
cos = (mk_model.cosine_distance(test_mers0[i], test_mers1[i]))
align_sim = pairwise2.align.globalxx(test_mers0[i], test_mers1[i], score_only=True)
ts.append([cos, align_sim])
ts = np.array(ts)
cor, pval = stats.spearmanr(ts[:, 1], ts[:, 0])
return ts, cor, pval
def arithmetic_experiment(operands=[(3,3), (4,4)],
n_nns=[1, 5, 10],
concatenation='weak',
samples=1000,
use_random_snippet=False):
"""
Searches nearest neighbors of a vector made by concatenating i-mer + j-mer
with the i-mer + j-mer as a concatenated string.
WARNING: As currently implemented this is walk-away-from-your-computer slow for ~1000 samples.
:operands: List of (i,j) k-mers to use
:n_nns: Number of nearest neighbors to search
:samples: Number of searches to perform
:concatenation: Concatenation style. Weak concatentation is order independent. Strong concatenation is in order i->j.
:use_random_snippet: (CHANGES OUTPUT) Also report the result of a random snippet replacing the j-mer.
"""
results_arithmetic = {}
max_topn = np.max(n_nns)
# Short function for comparions
def if_concatenation_is_nn(L_0, L_1, nn_list, concatenation=concatenation):
"""
Returns True if a concatenation of two strings in the list of nearest neighbors
"""
if concatenation == 'strong':
concat_query = (L_0 + L_1).encode("utf-8")
if concat_query in nn_list:
is_nn = True
else:
is_nn = False
elif concatenation == 'weak':
concat_queries = ((L_0 + L_1).encode("utf-8"), (L_1 + L_0).encode("utf-8"))
if len(set(concat_queries).intersection(set(nn_list))) > 0:
is_nn = True
else:
is_nn = False
return is_nn
# Run experiment
for l_operands in operands:
# Initialize data
matches = np.zeros((samples, len(n_nns)))
if use_random_snippet:
snippet_matches = np.zeros((samples, len(n_nns)))
for s in range(samples):
# Make k-mers (equal length)
L_0 = get_mers(k=l_operands[0], num_mers=1)[0]
L_1 = get_mers(k=l_operands[1], num_mers=1)[0]
snippet = get_mers(k=l_operands[1], num_mers=1)[0]
# Generate vector
query_vec = mk_model.vector(L_0) + mk_model.vector(L_1)
# Get top N nearest neighbors of vector
nns = most_similar(mk_model, query_vec, is_vector=True, same_k=False, topn=max_topn)
nn_list = pd.DataFrame(nns)['kmer'].tolist()
for n, topn in enumerate(n_nns):
# Is query string among those top N nearest neighbors of vector?
matches[s, n] = if_concatenation_is_nn(L_0, L_1, nn_list[0:topn], concatenation=concatenation)
if use_random_snippet:
snippet_matches[s, n] = if_concatenation_is_nn(L_0, snippet, nn_list[0:topn], concatenation=concatenation)
matching_fractions = (pd.DataFrame(matches).sum() / | pd.DataFrame(matches) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = | pd.date_range('20000101', periods=3) | pandas.date_range |
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import os
import sys
import numpy as np
import pandas as pd
## Add stuff to the path to enable exec outside of DSS
plugin_root = os.path.dirname(os.path.dirname(os.path.dirname((os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))))
sys.path.append(os.path.join(plugin_root, 'python-lib'))
import dku_timeseries
JUST_BEFORE_SPRING_DST = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
JUST_BEFORE_FALL_DST = pd.Timestamp('20191027 02:59:00').tz_localize('CET',
ambiguous=True) # It's ambiguous because there are 2 instants with these dates! We select the first
TIME_COL = 'time_col'
DATA_COL = 'data_col'
GROUP_COL = 'group_col'
### Helpers to create test data, should be fixtures at some point I guess
def _make_df_with_one_col(column_data, period=pd.DateOffset(seconds=1), start_time=JUST_BEFORE_SPRING_DST):
from datetime import datetime
top = datetime.now()
time = pd.date_range(start_time, None, len(column_data), period)
top = datetime.now()
df = pd.DataFrame({TIME_COL: time, DATA_COL: column_data})
return df
def _make_window_aggregator_params():
params = dku_timeseries.WindowAggregatorParams(window_width=3)
return params
def _make_window_aggregator():
params = _make_window_aggregator_params()
return dku_timeseries.WindowAggregator(params)
def _make_extrema_extraction_params():
window = _make_window_aggregator()
params = dku_timeseries.ExtremaExtractorParams(window)
return params
def _make_extrema_extractor():
params = _make_extrema_extraction_params()
return dku_timeseries.ExtremaExtractor(params)
### Test cases
class TestExtremaExtraction:
def test_empty_df(self):
df = _make_df_with_one_col([])
extrema_extractor = _make_extrema_extractor()
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL, [GROUP_COL])
assert output_df.shape == (0, 2)
def test_single_row_df(self):
df = _make_df_with_one_col([33])
extrema_extractor = _make_extrema_extractor()
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL, [GROUP_COL])
assert output_df.shape == (1, 2)
assert output_df[DATA_COL][0] == df[DATA_COL][0]
def test_incremental_df(self):
length = 100
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
print(df.shape)
extrema_extractor = _make_extrema_extractor()
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL)
assert (output_df[DATA_COL][0]) == 99
assert (output_df[DATA_COL + '_min'][0]) == 96 # window width = 3
def test_extrema_without_neighbors(self):
length = 100
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
window_aggregator = dku_timeseries.WindowAggregator(dku_timeseries.WindowAggregatorParams(window_unit='milliseconds'))
params = dku_timeseries.ExtremaExtractorParams(window_aggregator=window_aggregator)
extrema_extractor = dku_timeseries.ExtremaExtractor(params)
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL)
# only have DATE_TIME col and DATA_COL of the extrema, no stats because no neighbors
assert output_df.shape == (1, 2)
assert output_df[DATA_COL][0] == 99
def test_group_extrema_without_neighbors(self):
start_time_1 = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
start_time_2 = pd.Timestamp('20190131 02:00:00').tz_localize('CET')
start_time_list = [start_time_1, start_time_2]
len1 = 100
len2 = 10
data1 = range(len1)
data2 = range(len2)
data_list = [data1, data2]
period1 = pd.DateOffset(seconds=1)
period2 = pd.DateOffset(seconds=1)
period_list = [period1, period2]
df_list = []
for group_id, data, period, start_time in zip(range(len(data_list)), data_list, period_list, start_time_list):
group_name = 'group_{}'.format(group_id)
temp_df = _make_df_with_one_col(data, period=period, start_time=start_time)
temp_df[GROUP_COL] = group_name
df_list.append(temp_df)
df = pd.concat(df_list, axis=0)
window_aggregator = dku_timeseries.WindowAggregator(dku_timeseries.WindowAggregatorParams(window_unit='milliseconds'))
params = dku_timeseries.ExtremaExtractorParams(window_aggregator=window_aggregator)
extrema_extractor = dku_timeseries.ExtremaExtractor(params)
output_df = extrema_extractor.compute(df, TIME_COL, DATA_COL, groupby_columns=[GROUP_COL])
assert output_df.shape == (2, 3)
assert np.array_equal(output_df[DATA_COL], [99, 9])
def test_incremental_group_df(self):
start_time_1 = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
start_time_2 = pd.Timestamp('20190131 02:00:00').tz_localize('CET')
start_time_list = [start_time_1, start_time_2]
len1 = 100
len2 = 10
data1 = range(len1)
data2 = range(len2)
data_list = [data1, data2]
period1 = pd.DateOffset(seconds=1)
period2 = pd.DateOffset(seconds=1)
period_list = [period1, period2]
df_list = []
for group_id, data, period, start_time in zip(range(len(data_list)), data_list, period_list, start_time_list):
group_name = 'group_{}'.format(group_id)
temp_df = _make_df_with_one_col(data, period=period, start_time=start_time)
temp_df[GROUP_COL] = group_name
df_list.append(temp_df)
df = | pd.concat(df_list, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""Find hydrated waters in structure."""
# standard library imports
from pathlib import Path
from typing import List
from typing import Optional
from typing import Tuple
# 3rd-party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from loguru import logger
from statsdict import Stat
# module imports
from . import NAME
from . import VERSION
from .common import APP
from .common import STATS
# global constants
ATOM_REC: str = "ATOM"
ATOM_START_POS: int = 13
ATOM_STOP_POS: int = 15
ATOMS: Tuple[str, ...] = ("CA",)
B_FACTOR_START: int = 61
B_FACTOR_STOP: int = 65
REC_TYPE_START: int = 0
REC_TYPE_STOP: int = 4
DEFAULT_MIN_LENGTH = 20
DEFAULT_MIN_COUNT = 20
DEFAULT_PLDDT_LOWER_BOUND = 80
DEFAULT_PLDDT_UPPER_BOUND = 100
DEFAULT_PLDDT_CRITERION = 91.2
DEFAULT_LDDT_CRITERION = 0.8
DEFAULT_OUT_FILE_TYPE = "png"
DEFAULT_RESIDUE_CRITERION = 80
CRITERION_TYPE = "median"
MODULE_NAME = __name__.split(".")[0]
EMPTY_PATH = Path()
def bin_labels(bin_type, lower_bound, upper_bound=DEFAULT_PLDDT_UPPER_BOUND):
"""Create labels for bins of different quantities."""
if upper_bound == DEFAULT_PLDDT_UPPER_BOUND:
upper_label = ""
else:
upper_label = f"_{upper_bound}"
return f"pLDDT{lower_bound}{upper_label}_{bin_type}"
def extract_b_factors(file_path: Path) -> List[float]:
"""Return an array of B factors from a PDB file specified by file_path."""
if not file_path.exists():
raise ValueError(f"PDB file {file_path} does not exist")
with file_path.open("rU") as f:
# parse b_factors out of PDB file
b_factor_list = [
float(rec[B_FACTOR_START:B_FACTOR_STOP])
for rec in f.readlines()
if (
(len(rec) > B_FACTOR_STOP)
and (rec[REC_TYPE_START:REC_TYPE_STOP] == ATOM_REC)
and (rec[ATOM_START_POS:ATOM_STOP_POS] in ATOMS)
)
]
return b_factor_list
def compute_plddt_stats(
file_path,
lower_bound=DEFAULT_PLDDT_LOWER_BOUND,
min_count=DEFAULT_MIN_COUNT,
min_length=DEFAULT_MIN_LENGTH,
upper_bound=DEFAULT_PLDDT_UPPER_BOUND,
):
"""Compute stats on pLDDTs for a PDB file specified by file_path."""
plddts = np.array(extract_b_factors(file_path))
n_pts = len(plddts)
mean = np.NAN
median = np.NAN
n_trunc_obs = np.NAN
trunc_mean = np.NAN
trunc_median = np.NAN
trunc_frac = np.NAN
if n_pts >= min_length:
mean = plddts.mean().round(2)
median = np.median(plddts).round(2)
obs = plddts[(plddts >= lower_bound) &
(plddts <= upper_bound)]
n_trunc_obs = len(obs)
if len(obs) >= min_count:
trunc_mean = obs.mean().round(2)
trunc_median = np.median(obs).round(2)
trunc_frac = round(n_trunc_obs / n_pts, 2)
return (
n_pts,
mean,
median,
n_trunc_obs,
trunc_frac,
trunc_mean,
trunc_median,
str(file_path),
)
@APP.command()
@STATS.auto_save_and_report
def plddt_stats(
pdb_list: Optional[List[Path]],
criterion: Optional[float] = DEFAULT_PLDDT_CRITERION,
min_length: Optional[int] = DEFAULT_MIN_LENGTH,
min_count: Optional[int] = DEFAULT_MIN_COUNT,
lower_bound: Optional[int] = DEFAULT_PLDDT_LOWER_BOUND,
upper_bound: Optional[int] = DEFAULT_PLDDT_UPPER_BOUND,
file_stem: Optional[str] = MODULE_NAME,
) -> None:
"""Calculate stats on bounded pLDDTs from list of PDB model files."""
results = []
criterion_label = bin_labels(CRITERION_TYPE, lower_bound, upper_bound)
stats_file_path = Path(f"{file_stem}_plddt_stats.tsv")
n_models_in = len(pdb_list)
STATS["models_in"] = Stat(n_models_in, desc="models read in")
STATS["min_length"] = Stat(min_length, desc="minimum sequence length")
STATS["min_count"] = Stat(
min_length, desc="minimum # of selected residues"
)
STATS["plddt_lower_bound"] = Stat(
lower_bound, desc="minimum bound per-residue"
)
STATS["plddt_upper_bound"] = Stat(
upper_bound, desc="maximum bound per-residue"
)
STATS["plddt_criterion"] = Stat(
criterion, desc=f"minimum bounded {CRITERION_TYPE} for selection"
)
for file_path in pdb_list:
results.append(
compute_plddt_stats(
file_path,
lower_bound=lower_bound,
min_count=min_count,
min_length=min_length,
upper_bound=upper_bound,
)
)
stats = pd.DataFrame(
results,
columns=(
[
"residues_in_pLDDT",
"pLDDT_mean",
"pLDDT_median",
bin_labels("count", lower_bound, upper_bound),
bin_labels("frac", lower_bound, upper_bound),
bin_labels("mean", lower_bound, upper_bound),
criterion_label,
"file",
]
),
)
logger.info(f"Writing stats to {stats_file_path}")
stats.sort_values(by=criterion_label, inplace=True, ascending=False)
stats = stats.reset_index()
stats.index.name = f"{NAME}-{VERSION}"
del stats["index"]
if ((lower_bound == DEFAULT_PLDDT_LOWER_BOUND) and
(upper_bound == DEFAULT_PLDDT_UPPER_BOUND)):
file_col = stats["file"]
del stats["file"]
stats["LDDT_expect"] = (
1.0
- (
(1.0 - (stats[criterion_label] / 100.0))
* (1.0 - DEFAULT_LDDT_CRITERION)
/ (1.0 - DEFAULT_PLDDT_CRITERION / 100.0)
)
).round(3)
stats["passing"] = (stats["LDDT_expect"] >= DEFAULT_LDDT_CRITERION)
stats["file"] = file_col
stats.to_csv(stats_file_path, sep="\t")
total_residues = int(stats["residues_in_pLDDT"].sum())
STATS["total_residues"] = Stat(
total_residues, desc="number of residues in all models"
)
selected_stats = stats[
stats[criterion_label] >= criterion
]
n_models_selected = len(selected_stats)
frac_models_selected = round(n_models_selected * 100.0 / n_models_in, 0)
STATS["models_selected"] = Stat(
n_models_selected,
desc=f"models passing {criterion_label}>={criterion}",
)
STATS["model_selection_pct"] = Stat(
frac_models_selected, desc="fraction of models passing, %"
)
selected_residues = int(selected_stats["residues_in_pLDDT"].sum())
STATS["selected_residues"] = Stat(
selected_residues, desc="residues in passing models"
)
@APP.command()
def plddt_select_residues(
criterion: Optional[float] = DEFAULT_PLDDT_CRITERION,
min_length: Optional[int] = DEFAULT_MIN_LENGTH,
min_count: Optional[int] = DEFAULT_MIN_COUNT,
lower_bound: Optional[int] = DEFAULT_PLDDT_LOWER_BOUND,
upper_bound: Optional[int] = DEFAULT_PLDDT_UPPER_BOUND,
file_stem: Optional[str] = MODULE_NAME,
) -> None:
"""Select residues from files matching criterion."""
stats_file_path = Path(f"{file_stem}_plddt_stats.tsv")
stats = pd.read_csv(stats_file_path, sep="\t")
criterion_label = bin_labels(CRITERION_TYPE, lower_bound, upper_bound)
count_label = bin_labels("count", lower_bound, upper_bound)
plddt_list = []
residue_list = []
file_list = []
for row_num, row in stats.iterrows():
if (
(row["residues_in_pLDDT"] >= min_length)
and (row[criterion_label] >= criterion)
and (row[count_label] >= min_count)
):
plddts = extract_b_factors(Path(row["file"]))
n_res = len(plddts)
plddt_list += plddts
residue_list += [i for i in range(n_res)]
file_list += [row["file"]] * n_res
df = pd.DataFrame(
{"file": file_list, "residue": residue_list, "pLDDT": plddt_list}
)
out_file_path = Path(f"{file_stem}_plddt{lower_bound}_{criterion}.tsv")
logger.info(f"Writing residue file {out_file_path}")
df.to_csv(out_file_path, sep="\t")
@APP.command()
@STATS.auto_save_and_report
def plddt_plot_dists(
criterion: Optional[float] = DEFAULT_PLDDT_CRITERION,
lower_bound: Optional[int] = DEFAULT_PLDDT_LOWER_BOUND,
upper_bound: Optional[int] = DEFAULT_PLDDT_UPPER_BOUND,
file_stem: Optional[str] = MODULE_NAME,
out_file_type: Optional[str] = DEFAULT_OUT_FILE_TYPE,
residue_criterion: Optional[int] = DEFAULT_RESIDUE_CRITERION
) -> None:
"""Plot histograms of per-model and per-residue pLDDT distributions."""
stats_file_path = Path(f"{file_stem}_plddt_stats.tsv")
res_file_path = Path(f"{file_stem}_plddt{lower_bound}_{criterion}.tsv")
fig_file_path = Path(f"{file_stem}_dists.{out_file_type}")
per_model = pd.read_csv(stats_file_path, sep="\t")
per_model = per_model.fillna(0.0)
criterion_label = bin_labels(CRITERION_TYPE, lower_bound, upper_bound)
x_axis = r"$pLDDT$"
plddt_col = bin_labels(CRITERION_TYPE, lower_bound, upper_bound)
per_model[x_axis] = per_model[plddt_col]
select_residues = | pd.read_csv(res_file_path, sep="\t") | pandas.read_csv |
from unittest.case import TestCase
from pandas import Series
from probability.distributions import Multinomial, Binomial
class TestMultinomial(TestCase):
def setUp(self) -> None:
self.p = Series({'a': 0.4, 'b': 0.3, 'c': 0.2, 'd': 0.1})
self.m_array = Multinomial(n=10, p=self.p.values)
self.m_series = Multinomial(n=10, p=self.p)
self.m_dict = Multinomial(n=10, p=self.p.to_dict())
def test_init_with_array(self):
expected = Series({'p1': 0.4, 'p2': 0.3, 'p3': 0.2, 'p4': 0.1})
actual = self.m_array.p
self.assertTrue(expected.equals(actual))
def test_init_with_series(self):
expected = self.p
actual = self.m_series.p
self.assertTrue(expected.equals(actual))
def test_init_with_dict(self):
expected = self.p
actual = self.m_dict.p
self.assertTrue(expected.equals(actual))
def test_set_alpha_with_array(self):
m = Multinomial(n=10, p=[0.1, 0.2, 0.3, 0.4])
expected = | Series({'p1': 0.4, 'p2': 0.3, 'p3': 0.2, 'p4': 0.1}) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.