prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import sys
import warnings
import importlib
import venusian
import pandas as pd
class PluginScanner(venusian.Scanner):
def __init__(self):
self._registry = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 14:26:34 2021
@author: hagen
"""
import pandas as pd
import socket
import pops_gui.data
import serial
class Listen2UDP(object):
def __init__(self,
port = 10080,
verbose = False,
controller = None,
test = False):
self.controller = controller
self.port = port
self._verbose = verbose
self.test = test
self._socket_inbound = None
self.data = | pd.DataFrame() | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_WRMSSE_metric.ipynb (unless otherwise specified).
__all__ = ['get_agg', 'get_df_weights', 'combine_cols', 'append_df_unique_id', 'WRMSSE']
# Cell
#export
import os
from time import time
import gc
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.rcParams['figure.figsize'] = (14,6)
plt.rcParams['font.size'] = 16
# Cell
def get_agg(df_stv):
"""Gets a sparse aggregaion matrix and index to align weights and scales."""
# Take the transpose of each dummy matrix to correctly orient the matrix
dummy_frames = [
pd.DataFrame({'Total': np.ones((df_stv.shape[0],)).astype('int8')}, index=df_stv.index).T,
pd.get_dummies(df_stv.state_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id, dtype=np.int8).T,
pd.get_dummies(df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.state_id + '_' + df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.state_id + '_' + df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id + '_' + df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id + '_' + df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.item_id, dtype=np.int8).T,
pd.get_dummies(df_stv.item_id + '_' + df_stv.state_id, dtype=np.int8).T,
pd.get_dummies(df_stv.item_id + '_' + df_stv.store_id, dtype=np.int8).T
]
agg_matrix = pd.concat(dummy_frames, keys=range(1,13), names=['level', 'id'])
# Save the index for later use
agg_index = agg_matrix.index
# Sparse format will save space and calculation time
agg_matrix_csr = csr_matrix(agg_matrix)
return agg_matrix_csr, agg_index
# Cell
def get_df_weights(df_stv, df_cal, df_prices, agg_index, agg_matrix_csr, start_test=1914):
"""Returns the weight, scale, and scaled weight of all series,
in a dataframe aligned with the agg_index, created in get_agg()
##### Weights steps
We need to convert the sales data into dollar sales
data so that we can correctly weight each series.
To begin, we consider only the last 28 days of
data before START_TEST. We then put the data into
"long" format so we can merge the calendar
and price information.
Now we will get the total dollar sales for each
item/store combination. Be sure to set sort=False
so that our index stays in the proper order.
We don't need df anymore
We want to build a weight, scales,
and scaled weight columns
that are aligned with agg_index. We
will divide dollar_sales by the total
dollar sales to get the weight W
for each series. We don't need dollar_sales anymore.
##### Scaling factor steps
We also need to calculate each series scaling factor S,
which is the denominator in the WRMSSE cacluation. It can
be pulled out of the square root and combined with the
series weight to make a single weight W/sqrt(S),
simplifying our calculations a bit.
S is the average squared difference of day to daily sales
for a series, excluding leading zeros, for all training
days leading up to START_TEST.
Aggregate all series, and replace leading
zeros with np.nan so that we can do numpy calculations
that will ignore the np.nan.
Now we can finish our weights and scales dataframe by
adding scale and scaled_weight columns.
"""
d_cols = [f'd_{i}' for i in range(start_test - 28, start_test)]
df = df_stv[['store_id', 'item_id'] + d_cols]
df = df.melt(id_vars=['store_id', 'item_id'],
var_name='d',
value_name = 'sales')
df = df.merge(df_cal[['d', 'wm_yr_wk']], on='d', how='left')
df = df.merge(df_prices, on=['store_id', 'item_id', 'wm_yr_wk'], how='left')
df['dollar_sales'] = df.sales * df.sell_price
# Now we will get the total dollar sales
dollar_sales = df.groupby(['store_id', 'item_id'], sort=False)['dollar_sales'].sum()
del df
# Build a weight, scales, and scaled weight columns
# that are aligned with agg_index.
df_weights = pd.DataFrame(index = agg_index)
df_weights['dollar_sales'] = agg_matrix_csr * dollar_sales
df_weights['weight'] = df_weights.dollar_sales / df_weights.dollar_sales.values[0]
del df_weights['dollar_sales']
##################### Scaling factor #######################
df = df_stv.loc[:, :f'd_{start_test - 1}'].iloc[:, 6:]
agg_series = agg_matrix_csr * df.values
no_sale = np.cumsum(agg_series, axis=1) == 0
agg_series = np.where(no_sale, np.nan, agg_series)
scale = np.nanmean(np.diff(agg_series, axis=1) ** 2, axis=1)
df_weights['scale'] = 1 / np.sqrt(scale)
df_weights['scaled_weight'] = df_weights.weight * df_weights.scale
return df_weights
# Cell
def combine_cols(df, cols: list, sep='__', name='id', reverse=False):
"""Returns a copy of `df` with `cols` combined into a single coloumn `name`,
separated by `sep`, or with the `name` column expanded into `cols` if `reverse` is True."""
df = df.copy()
if reverse:
final_cols = cols + df.drop(name, axis=1).columns.tolist()
df[cols] = df[name].str.split(sep).tolist()
else:
final_cols = [name] + df.drop(cols, axis=1).columns.tolist()
df[name] = df[cols].astype(str).apply(sep.join, axis=1)
return df[final_cols]
# Cell
def append_df_unique_id(df, df_new, id_col='id') -> pd.DataFrame:
"""Returns a copy of df with df_new appended to it with '(n)_'
prepended to the id_col if the new column value is already in
the original df. This is used to track scores and ensure there
are not copies of a unique identifier.
`id_col` should be of string type.
"""
if not (id_col in df.columns and id_col in df_new.columns):
return df.append(df_new) # No issues
df = df.copy()
df_new = df_new.copy()
ids = df[id_col].tolist()
new_id = df_new[id_col][0]
if new_id in ids:
x = 1
while f'({x})_' + new_id in ids:
x += 1
new_id = f'({x})_' + new_id
df_new[id_col] = f'({x})_' + df_new[id_col]
return df.append(df_new)
# Cell
class WRMSSE():
def __init__(self, PATH_DATA_RAW: str='data/raw', start_test: int=1914, horizon: int=28, df_stv_trunc: pd.DataFrame=None):
"""The main object that will hold data, weights and scales which are
associated with the forecast horizon starting on `start_test`,
extending horizon `days`.
"""
if type(df_stv_trunc) == pd.DataFrame: # Provided after filtering out certain items
self.df_stv = df_stv_trunc
else:
self.df_stv = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sales_train_evaluation.csv'))
self.df_cal = pd.read_csv(os.path.join(PATH_DATA_RAW, 'calendar.csv'))
self.df_prices = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sell_prices.csv'))
self.df_ss = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sample_submission.csv'))
self.start_test = start_test
self.end_test = start_test + horizon - 1
self.preds, self.actuals = None, None
self.df_series_scores, self.model_name = None, None
path = os.path.join(PATH_DATA_RAW, '..', 'scores.csv')
if os.path.exists(path):
self.scores = pd.read_csv(path)
else:
self.scores = pd.DataFrame()
if f'd_{self.end_test}' in self.df_stv.columns:
self.actuals = self.df_stv.loc[:, f'd_{start_test}': f'd_{self.end_test}'].values
self.agg_matrix_csr, self.agg_index = get_agg(self.df_stv)
self.df_weights = get_df_weights(self.df_stv, self.df_cal, self.df_prices,
self.agg_index, self.agg_matrix_csr, start_test)
self.w_12 = self.df_weights.loc[12]
self.w_12.index += '_evaluation'
def score(self, preds: np.array, fast: bool=True, model_name: str=None) -> float:
"""Scores preds against `self.actuals`. If `fast` is set to True, nothing
will be saved. If `fast` is set to False, `self.df_series_scores` will be
set to a dataframe with the scores for each of the 40280 series, and if
`model_name` name is also passed, `self.model_name` is set, `self.scores`
will be updated with the 12 levels scores along with total score and then
saved to csv.
"""
if type(preds) == pd.DataFrame:
preds = preds.values
base_errors = self.actuals - preds
errors = self.agg_matrix_csr * base_errors
mse = np.sqrt(np.mean((errors)**2, axis=1))
wrmsse_by_series = mse * self.df_weights.scaled_weight
wrmsse = np.sum(wrmsse_by_series) / 12
if not fast:
self.preds = preds
self.df_series_scores = pd.DataFrame(wrmsse_by_series).rename(
mapper={'scaled_weight': 'WRMSSE'}, axis=1)
if model_name:
self.model_name = model_name
print(f'Saving level scores with model name: {model_name}')
self._append_level_scores(self.df_series_scores, model_name)
return wrmsse
def feval(self, preds, train_data) -> tuple:
"""For custom metric in lightgbm"""
preds = preds.reshape(self.actuals.shape[1], -1).T
score = self.score(preds)
return 'WRMSSE', score, False
@staticmethod
def get_weighted_mse_feval(w_12_eval, weight_col) -> callable:
"""Returns a weighted root mean squared error metric function for lightgbm.
w_12_eval must be aligned with grid_df like
w_12_eval = w_12.reindex(grid_df[eval_mask].id)
"""
weight = w_12_eval[weight_col] / w_12_eval[weight_col].mean()
def feval(preds, eval_data) -> tuple:
actuals = eval_data.get_label()
diff = preds - actuals
res = np.mean(diff ** 2 * weight)
return f'mse_feval_{weight_col}', res, False
return feval
@staticmethod
def get_weighted_mae_feval(w_12_eval, weight_col) -> callable:
"""Returns a weighted mean absolute error metric function for lightgbm.
w_12_eval must be aligned with grid_df like
w_12_eval = w_12.reindex(grid_df[eval_mask].id)
"""
weight = w_12_eval[weight_col] / w_12_eval[weight_col].mean()
def feval(preds, eval_data) -> tuple:
actuals = eval_data.get_label()
diff = preds - actuals
res = np.mean(np.abs(diff ** 2 * weight))
return f'mae_feval_{weight_col}', res, False
return feval
@staticmethod
def get_weighted_mse_fobj(w_12_train, weight_col, weight_hess=True) -> callable:
"""Returns a weighted mean squared error objective function for lightgbm.
w_12_train must be aligned with grid_df like
w_12_train = w_12.reindex(grid_df[train_mask].id)
"""
weight = w_12_train[weight_col] / w_12_train[weight_col].mean()
def fobj(preds, train_data) -> tuple:
actuals = train_data.get_label()
diff = preds - actuals
grad = diff * weight
hess = weight if weight_hess else np.ones_like(diff)
return grad, hess
return fobj
@staticmethod
def get_weighted_mae_fobj(w_12_train, weight_col, weight_hess=True) -> callable:
"""Returns a weighted mean absolute error objective function for lightgbm.
w_12_train must be aligned with grid_df like
w_12_train = w_12.reindex(grid_df[train_mask].id)
"""
weight = w_12_train[weight_col] / w_12_train[weight_col].mean()
def fobj(preds, train_data) -> tuple:
actuals = train_data.get_label()
diff = preds - actuals
grad = np.sign(diff) * weight
hess = weight if weight_hess else np.ones_like(diff)
return grad, hess
return fobj
def _append_level_scores(self, df_series_scores, model_name) -> None:
# level_scores
level_scores = df_series_scores.groupby(level=0).sum()
level_scores.loc[13] = level_scores.mean()
level_scores['model_name'] = model_name
level_scores['start_test'] = start_test
level_scores.reset_index(inplace=True)
cols, sep, name = ['model_name', 'level', 'start_test'], '__', 'id'
level_scores = combine_cols(level_scores, cols, sep, name)
self.scores = append_df_unique_id(self.scores, level_scores)
def dump_scores(self, path_dir: str='.') -> None:
"""Saves `self.scores`, which contains scores of each level for
each `model_name` `start_test` combination.
"""
self.scores.to_csv(os.path.join(path_dir, 'scores.csv'), index=False)
def plot_scores(self, df_series_scores=None, model_name: str=None) -> tuple:
"""Returns a tuple: fig, ax with a seaborn plot of the 12 levels of the wrmsse."""
if not df_series_scores: df_series_scores = self.df_series_scores
if not model_name: model_name = self.model_name
fig, ax = plt.subplots()
level_scores = df_series_scores.groupby(level=0).sum()
sns.barplot(x=level_scores.index, y=level_scores['WRMSSE'])
plt.axhline(level_scores.mean()[0], color='blue', alpha=.5, ls=':')
name_and_days = f'{model_name} test {self.start_test} to {self.end_test}'
title = f'{name_and_days} WRMSSE: {round(level_scores.mean()[0], 4)}'
plt.title(title, fontsize=20, fontweight='bold')
for i in range(12):
ax.text(i, level_scores['WRMSSE'][i+1],
str(round(level_scores['WRMSSE'][i+1], 4)),
color='black', ha='center', fontsize=15)
plt.show()
return fig, ax
def make_sub(self, preds: np.array=None, test=False, model_name='no_name', path_dir='.') -> None:
"""Creates and writes a csv file that is ready for submission. If `test` is
set to True, it will be for the final test set, otherwise, the predictions
are for the validation set.
The files name will be at `path_dir`/sub_`model_name`.csv"""
if not preds: preds = self.preds
model_name = self.model_name if self.model_name else 'no_name'
df_preds = | pd.DataFrame(preds, index=df_scores.loc[12].index) | pandas.DataFrame |
import numpy as np
from at import *
from at.load import load_mat
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
import at.plot
import numpy as np
from pylab import *
import pandas as pd
import csv
from random import random
def plot_closedOrbit(ring, refpts):
elements_indexes = get_refpts(ring, refpts)
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x= lindata['beta'][:, 0]
beta_y= lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("s_pos")
# Label for y-axis
plt.ylabel("closed_orbit x")
# for display
i = 0
S_pos2 = []
plt.title("Closed orbit x")
plt.show()
plt.plot(s_pos, closed_orbity)
# Label for x-axis
plt.xlabel("s_pos")
# Label for y-axis
plt.ylabel("closed_orbit y")
# for display
i = 0
S_pos2 = []
plt.title("Closed orbit y")
plt.show()
def correctionType(alpha1,alpha2, alpha3):
if alpha1 == 1:
type = "optics correction"
if alpha2 == 1:
type = "dispersion correction"
if alpha3 == 1:
type = "optics and dispersion correction"
print("This code performs: ", type)
#return type
def func(j, mylist):
# dedup, preserving order (dict is insertion-ordered as a language guarantee as of 3.7):
deduped = list(dict.fromkeys(mylist))
# Slice off all but the part you care about:
return deduped[::j]
def defineMatrices_w_eta(W, alpha1, alpha2,alpha3, C0x, C0y, C0xy, C0yx, Cxx_err, Cyy_err, Cxy_err, Cyx_err, dCx, dCy, dCxy,dCyx):
Nk = len(dCx) # number of free parameters
Nm = len(dCx) # number of measurements
print('NK:', Nk)
print('Nm:', Nm)
Ax = np.zeros([Nk, Nk])
Ay = np.zeros([Nk, Nk])
Axy = np.zeros([Nk, Nk])
Ayx = np.zeros([Nk, Nk])
A = np.zeros([4 * Nk, Nk])
##
Bx = np.zeros([Nk, 1])
By = np.zeros([Nk, 1])
Bxy = np.zeros([Nk, 1])
Byx = np.zeros([Nk, 1])
B = np.zeros([4 * Nk, 1])
##
Dx = (Cxx_err[:, :] - C0x[:, :] )#- error_variance) ### dk ?
Dy = (Cyy_err[:, :] - C0y[:, :] )
Dxy = (Cxy_err[:, :] - C0xy[:, :])
Dyx = (Cyx_err[:, :] - C0yx[:, :] )
##
for i in range(Nk): ## i represents each quad
# print('done A:', 100.* i ,'%')
for j in range(Nk):
Ax[i, j] = np.sum(np.dot(np.dot(dCx[i][0: -2, :],W*alpha1), dCx[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCx[i][ -2 ::, :],W*alpha2), dCx[j][ -2 ::, :].T)) + np.sum(np.dot(np.dot(dCx[i],W*alpha3), dCx[j].T))
Ay[i, j] = np.sum(np.dot(np.dot(dCy[i][0: -2, :],W*alpha1), dCy[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCy[i][ -2 ::, :],W*alpha2), dCy[j][ -2 ::, :].T))+ np.sum(np.dot(np.dot(dCy[i],W*alpha3), dCy[j].T))
Axy[i, j] = np.sum(np.dot(np.dot(dCxy[i][0: -2, :],W*alpha1), dCxy[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCxy[i][ -2 ::, :],W*alpha2), dCxy[j][ -2 ::, :].T))+ np.sum(np.dot(np.dot(dCxy[i],W*alpha3), dCxy[j].T))
Ayx[i, j] = np.sum(np.dot(np.dot(dCyx[i][0: -2, :],W*alpha1), dCyx[j][0: -2, :].T)) + np.sum(np.dot(np.dot(dCyx[i][ -2 ::, :],W*alpha2), dCyx[j][ -2 ::, :].T))+ np.sum(np.dot(np.dot(dCyx[i],W*alpha3), dCyx[j].T))
A[i, :] = Ax[i, :]
A[i + Nk, :] = Ay[i, :]
A[i + 2 * Nk, :] = Axy[i, :]
A[i + 3 * Nk, :] = Ayx[i, :]
##
for i in range(Nk):
Bx[i] = np.sum(np.dot(np.dot(dCx[i][0: -2, :],W*alpha1), Dx[0: -2, :].T))+ np.sum(np.dot(np.dot(dCx[i][ -2 ::, :],W*alpha2), Dx[ -2 ::, :].T)) + np.sum(np.dot(np.dot(dCx[i],W*alpha3), Dx.T))
By[i] = np.sum(np.dot(np.dot(dCy[i][0: -2, :],W*alpha1), Dy[0: -2, :].T)) + np.sum(np.dot(np.dot(dCy[i][ -2 ::, :],W*alpha2), Dy[ -2 ::, :].T))+np.sum(np.dot(np.dot(dCy[i],W*alpha3), Dy.T))
Bxy[i] = np.sum(np.dot(np.dot(dCxy[i][0: -2, :],W*alpha1), Dxy[0: -2, :].T))+ np.sum(np.dot(np.dot(dCxy[i][ -2 ::, :],W*alpha2), Dxy[ -2 ::, :].T))+np.sum(np.dot(np.dot(dCxy[i],W*alpha3), Dxy.T))
Byx[i] = np.sum(np.dot(np.dot(dCyx[i][0: -2, :],W*alpha1), Dyx[0: -2, :].T))+ np.sum(np.dot(np.dot(dCyx[i][ -2 ::, :],W*alpha2), Dyx[ -2 ::, :].T))+np.sum(np.dot(np.dot(dCyx[i],W*alpha3), Dyx.T))
B[i] = Bx[i]
B[i + Nk] = By[i]
B[i + 2 * Nk] = Bxy[i]
B[i + 3 * Nk] = Byx[i]
return A, B,
def getInverse(A, B,Nk, sCut):
u, s, v = np.linalg.svd(A, full_matrices=True)
smat = 0.0 * A
si = s ** -1
n_sv = sCut
si[n_sv:] *= 0.0
print("number of singular values {}".format(len(si)))
smat[:Nk, :Nk] = np.diag(si)
print('A' + str(A.shape), 'B' + str(B.shape), 'U' + str(u.shape), 'smat' + str(smat.shape), 'v' + str(v.shape))
plt.plot(np.log(s), 'd--')
plt.title('singular value')
plt.show()
plt.plot(si, 'd--')
plt.title('singular value inverse')
plt.show()
Ai = np.dot(v.transpose(), np.dot(smat.transpose(), u.transpose()))
###
r = (np.dot(Ai, B)).reshape(-1)
plot(r, 'd')
plt.show()
# error
e = np.dot(A, r).reshape(-1) - B.reshape(-1)
plt.plot(e)
plt.show()
plt.plot(B)
plt.show()
return Ai, r, e
def compare_orm(Cxy, Cxy_err, Cxy_corr, no):
# plot the 3 sets
plt.plot(Cxy[no], label='C')
plt.plot(Cxy_err[no], label='C_err')
plt.plot(Cxy_corr[no], label='C_corr')
# call with no parameters
plt.legend()
plt.show()
def compare_drm(Cxy, Cxy_err, Cxy_corr):
# plot the 3 sets
plt.plot(Cxy, label='$\eta$')
plt.plot(Cxy_err, label='$\eta_{err}$')
plt.plot(Cxy_corr, label='$\eta_{corr}$')
# call with no parameters
plt.legend()
plt.show()
def generatingQuadsResponse1(ring, Cxx, Cyy,Cxy, Cyx , used_correctors):
# %%time
quads_info = quad_info(ring)
quad_dict, quad_vals = getQuadFamilies(quads_info)
quads = [k for k in quad_dict.keys()]
quad_names = quads
dk = 0.0001
qxx = []
qxy = []
qyy = []
qyx = []
quad_names = quads
for qname in quad_names:
print('generating response to {}, n={}'.format(qname, quad_dict[qname]))
t0 = time.time()
nq = quad_dict[qname] + 1
for i in range(0, nq):
Qxx, Qxy, Qyy, Qyx = computeOpticsD1(ring, qname, i, dk, quad_vals, used_correctors)
qxx.append(Qxx)
qxy.append(Qxy)
qyy.append(Qyy)
qyx.append(Qyx)
t1 = time.time()
print(f"Execution time: {t1 - t0} sec")
C0x = Cxx
C0y = Cyy
C0xy = Cxy
C0yx = Cyx
dCx = []
dCy = []
dCxy = []
dCyx = []
quad_names = quads
for qname in quad_names:
# nquad = quad_dict[qname]
print('loading response to:', qname)
i = 0
while (i < len(qxx)):
C1x = qxx[i]
C1y = qyy[i]
C1xy = qxy[i]
C1yx = qyx[i]
dcxx = ((C1x - C0x) / dk)
dcyy = ((C1y - C0y) / dk)
dCxy.append((C1xy - C0xy) / dk)
dCyx.append((C1yx - C0yx) / dk)
dCx.append(dcxx)
dCy.append(dcyy)
i += 1
return C0x, C0y, C0xy, C0yx, dCx, dCy, dCxy,dCyx
def setCorrection(ring, quads_info_error,quad_names, r , quads_info,n_list, used_quads):
quad_dict, quad_vals = getQuadFamilies(quads_info_error)
n_list = len(quads_info_error.s_pos)
# print(n_list)
quad_names = quad_names
iq = 0
frac = 1.0
cor_dict = {}
DK = []
for qname in quad_names:
if qname in used_quads:
cor_dict[qname] = -r[iq] * frac
iq += 1
print("define correction : Done")
quads_indexes = get_refpts(ring, elements.Quadrupole)
for qname in quads_indexes:
if ring[qname].FamName in used_quads:
dk1 = cor_dict[ring[qname].FamName]
DK.append(dk1)
else:
DK.append(0)
quads_indexes = get_refpts(ring, elements.Quadrupole)
i = 0
while (i < len(quads_indexes)):
ring[quads_indexes[i]].K += DK[i]
i += 1
print("set correction : Done")
def setCorrection1(ring, quads_info_error,quad_names, r , quads_info,n_list):
quad_dict, quad_vals = getQuadFamilies(quads_info_error)
n_list = len(quads_info_error.s_pos)
# print(n_list)
quad_names = quad_names
iq = 0
frac = 1.0
cor_dict = {}
for qname in quad_names:
nquad = quad_dict[qname]
# print(qname, quad_dict[qname])
for i in range(0, nquad):
cor_dict[qname, i + 1] = -r[iq] * frac
iq += 1
print("define correction : Done")
DK = []
for idx in range(n_list):
qname_ = quads_info.elements_name[idx] # ElementName
occ = quads_info_error.occ[idx]
dk = cor_dict[qname_, occ]
DK.append(dk)
quads_indexes = get_refpts(ring, elements.Quadrupole)
i = 0
while (i < len(quads_indexes)):
ring[quads_indexes[i]].K += DK[i]
i += 1
print("set correction : Done")
def plotORM(orm):
plt.figure()
imshow(orm)
plt.show()
def getBetaBeat(twiss, twiss_error):
print("getBetaBeat bx and by: ")
bxi =[]
for i in range(len(twiss.betax)):
bxx = (twiss_error.betax[i] - twiss.betax[i]) / twiss.betax[i]
bxi.append(bxx)
byi =[]
for i in range(len(twiss.betay)):
byy = (twiss_error.betay[i] - twiss.betay[i]) / twiss.betay[i]
byi.append(byy)
bxx = np.array((twiss_error.betax - twiss.betax) / twiss.betax)
byy = np.array((twiss_error.betay - twiss.betay) / twiss.betay)
bx = np.sqrt(np.mean(bxx ** 2))
by = np.sqrt(np.mean(byy ** 2))
#bx = np.std((twiss_error.betax - twiss.betax) / twiss.betax)
#by = np.std((twiss_error.betay - twiss.betay) / twiss.betay)
print("Simulated beta beat, x:" + str(bx * 100) + "% y: " + str(by* 100) + "%")
def used_elements_plot(lattice, elements_indexes, used_quad):
elements_indexes = get_refpts(lattice, '*')
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x = lindata['beta'][:, 0]
beta_y = lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
#plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("closed_orbit_x")
# for display
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
plt.plot(s_pos, beta_x)
#plt.plot(s_pos, beta_x)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("beta_x")
# for display
S_pos2 = []
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
def used_elements_plot1(lattice, s_poss, used_quad):
elements_indexes = get_refpts(lattice, '*')
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x = lindata['beta'][:, 0]
beta_y = lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
#plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("closed_orbit_x")
# for display
i = 0
for i in s_poss:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
plt.plot(s_pos, beta_x)
#plt.plot(s_pos, beta_x)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("beta_x")
# for display
S_pos2 = []
i = 0
S_pos2 = []
for i in s_poss:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
def getDispersion(twiss, twiss_error, twiss_corrected):
plt.plot(twiss.dx, label='$\eta_x$')
plt.plot(twiss_error.dx, label='$\eta_x_err$')
plt.plot(twiss_corrected.dx, label='$\eta_x_corr$')
plt.legend()
plt.show()
plt.plot(twiss.dy, label='$\eta_y$')
plt.plot(twiss_error.dy, label='$\eta_y_err$')
plt.plot(twiss_corrected.dy, label='$\eta_y_corr$')
plt.legend()
plt.show()
def make_plot(twiss, plot_name):
from mpl_toolkits.axes_grid1 import host_subplot
import matplotlib.pyplot as plt
host = host_subplot(111)
par = host.twinx()
host.set_xlabel("s_pos")
host.set_ylabel(r'$\beta_x$')
host.set_ylabel(r'$\beta_y$')
par.set_ylabel("dx")
p1, = host.plot(twiss.s_pos, twiss.betax, label=r'$\beta_x$')
p2, = host.plot(twiss.s_pos, twiss.betay, label=r'$\beta_y$')
p3, = par.plot(twiss.s_pos, twiss.dx, label=r'$\eta_x$')
p4, = par.plot(twiss.s_pos, twiss.dy, label=r'$\eta_y$')
leg = plt.legend()
host.yaxis.get_label().set_color(p1.get_color())
leg.texts[0].set_color(p1.get_color())
host.yaxis.get_label().set_color(p2.get_color())
leg.texts[1].set_color(p2.get_color())
par.yaxis.get_label().set_color(p3.get_color())
leg.texts[2].set_color(p3.get_color())
plt.title(plot_name)
plt.show()
def used_quads_f1(ring, used_correctors_list, quad_dict):
# elements_name = used_correctors_list
correctors_indexes = []
quad_dict_ = []
elements_name = []
quads = pd.DataFrame()
s_pos =[]
for i in used_correctors_list:
# quad_dict_.append(int(quad_dict[i]))
quad_dict_ = int(quad_dict[i])
elements_numbers = quad_dict_
corrector_indexx = get_refpts(ring, i)
# print(corrector_index)
element_name = ring[corrector_indexx[0]].FamName
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=corrector_indexx)
s_poss = lindata['s_pos']
s_pos.append(s_poss)
df1 = {
str(i) + str("=") + str(" ") + str(quad_dict_) + str(" ") + str('quads'): corrector_indexx,
}
df2 = pd.concat([pd.DataFrame(v, columns=[k]) for k, v in df1.items()], axis=1)
quads = pd.concat([quads, df2], axis=1)
for j in range(len(s_pos)):
array1 = numpy.append(s_pos[0], s_pos[j])
return quads, s_pos
def used_quads_f(ring, used_correctors_list, quad_dict):
#elements_name = used_correctors_list
correctors_indexes = []
quad_dict_ = []
elements_name =[]
quads = pd.DataFrame()
for i in used_correctors_list:
#quad_dict_.append(int(quad_dict[i]))
quad_dict_= int(quad_dict[i])
elements_numbers = quad_dict_
corrector_index = get_refpts(ring, i)
#print(corrector_index)
element_name = ring[corrector_index[0]].FamName
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=corrector_index)
s_poss = lindata['s_pos']
#print(element_name)
df1 = {
str(i) + str("=") + str(" ")+ str( quad_dict_)+ str(" ")+ str('quads'): s_poss,
}
df2 = pd.concat([pd.DataFrame(v, columns=[k]) for k, v in df1.items()], axis=1)
correctors_indexes.append(np.squeeze(corrector_index))
elements_name.append(element_name)
quads = pd.concat([quads, df2], axis=1)
return quads
def used_elements_plot(lattice, used_quad):
elements_indexes = get_refpts(lattice, '*')
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x = lindata['beta'][:, 0]
beta_y = lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
plt.plot(s_pos, closed_orbitx)
#plt.plot(s_pos, closed_orbitx)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("closed_orbit_x")
# for display
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
plt.plot(s_pos, beta_x)
#plt.plot(s_pos, beta_x)
# Label for x-axis
plt.xlabel("elements_indexes")
# Label for y-axis
plt.ylabel("beta_x")
# for display
S_pos2 = []
i = 0
S_pos2 = []
while (i < used_quad.shape[1]):
S_pos1 = used_quad.iloc[:, i]
S_pos_ = df = pd.concat([S_pos1])
S_pos2.append(S_pos_)
i += 1
for i in S_pos_:
scatter(i, 0)
plt.title("used quadrupoles indices")
plt.show()
def used_correctors_f(lattice, used_correctors_list):
elements_name = used_correctors_list
correctors_indexes = []
quad_dict = []
for i in used_correctors_list:
# print(i)
corrector_index = get_refpts(lattice, i)
correctors_indexes.append(np.squeeze(corrector_index))
j = 0
s_pos=[]
Ind= []
while (j < len( corrector_index)):
corrector_indexx = corrector_index[j]
lindata0, tune, chrom, lindata = lattice.linopt(get_chrom=True, refpts=correctors_indexes)
s_poss = lindata['s_pos']
#s_pos.append(s_poss)
s_pos.append(np.squeeze(s_poss))
Ind.append(corrector_indexx)
df1 = {'Used elements names': elements_name, 'S_pos': correctors_indexes}
j += 1
return df1, s_pos, correctors_indexes
def getOptics(ring, refpts):
elements_indexes = get_refpts(ring, refpts)
lindata0, tune, chrom, lindata = ring.linopt(get_chrom=True, refpts=elements_indexes)
closed_orbitx = lindata['closed_orbit'][:, 0]
closed_orbity = lindata['closed_orbit'][:, 2]
s_pos = lindata['s_pos']
closed_orbit = lindata['closed_orbit']
beta_x= lindata['beta'][:, 0]
beta_y= lindata['beta'][:, 1]
dx = lindata['dispersion'][:, 0]
dy = lindata['dispersion'][:, 2]
print("preparing twiss ..")
print(f"Tunes={ring.get_tune()}")
print(f"Chrom={ring.get_chrom()}")
elements_name = []
elements_strength = []
elements_type =[]
i = 0
while (i < len(elements_indexes)):
element_name = ring[i].FamName
elements_name.append(element_name)
i +=1
output = [elements_name[:e].count(v) for e, v in enumerate(elements_name,1)]
twiss1 = {'s_pos': s_pos,'betax': beta_x,
'betay': beta_y, 'dx': dx, 'dy': dy}
twiss = | pd.DataFrame(twiss1) | pandas.DataFrame |
#!/usr/bin/env python3.6
import pandas as pd
from collections import defaultdict, Counter
import argparse
import sys
import os
import subprocess
import re
import numpy as np
from datetime import datetime
from itertools import chain
from pyranges import PyRanges
from SV_modules import *
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', None)
pd.options.display.max_rows = 999
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def createGeneSyndromeDict(database_df):
dict = defaultdict(list)
for var, hpo in database_df.itertuples(index=False): # var can either be gene or syndrome
dict[var].append(hpo)
return(dict)
def createWeightDict(weights):
try:
w_df = pd.read_csv(weights, sep = ' ', names=["HPO_id", "weight"], comment = '#')
except OSError:
print("Count not open/read the input file:" + weights)
sys.exit()
weightDict = dict(zip(w_df.HPO_id, w_df.weight))
return(weightDict)
def getClinicalPhenome(args):
# Get the clinical phenome and store as a set
try:
clinical_phenome = set(open("./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt").read().splitlines())
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt")
sys.exit()
return(clinical_phenome)
def calculateGeneSumScore(args, hpo_gene_dict, weightDict, clinical_phenome, omim_gene):
# Go through genes in genelist found in the patients
try:
genes = open("./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt", 'r')
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt")
sys.exit()
with genes:
gene = genes.read().splitlines()
gene_sum_score = 0
gene_score_result = pd.DataFrame(columns=['gene', 'score'])
for query in gene:
#print(query)
hpo_pheno = set(hpo_gene_dict[query]) # To get the phenotypic features for a given gene
overlap = hpo_pheno.intersection(clinical_phenome) # overlap all the phenotypic features with the clinical phenomes
for term in overlap:
gene_sum_score += weightDict[term]
gene_score_result = gene_score_result.append({'gene':query, 'score':gene_sum_score}, ignore_index=True)
gene_score_result_r = gene_score_result.iloc[::-1]
gene_score_result_r = pd.concat([gene_score_result_r, omim_gene])
gene_score_result_r = normalizeRawScore(args, gene_score_result_r, 'gene')
return(gene_score_result_r)
def getParentsGeno(filtered_intervar, inheritance_mode, ov_allele):
# Create two new columns and initialize to 0
filtered_intervar[inheritance_mode] = 0
filtered_intervar = filtered_intervar.reset_index(drop=True)
for idx, row in enumerate(filtered_intervar.itertuples(index=False)):
if int(getattr(row, 'Start')) in set(ov_allele['Start']):
#parents_geno = ov_allele.loc[ov_allele['Start'] == getattr(row, 'Start'), 'geno'].head(1)
#print(parents_geno)
parents_geno = ov_allele.loc[ov_allele['Start']==getattr(row,'Start'),'geno'].head(1).item()
filtered_intervar.loc[idx, inheritance_mode] = parents_geno
return(filtered_intervar)
def rerankSmallVariant(df):
df['Clinvar_idx'] = df.Clinvar.str[9:-1]
df['InterVar_idx'] = df.InterVar_InterVarandEvidence.str[10:].str.split('PVS1').str[0]
df[['Clinvar_idx', 'InterVar_idx']] = df[['Clinvar_idx', 'InterVar_idx']].apply(lambda x:x.astype(str).str.lower())
df['Clinvar_score'], df['InterVar_score'] = 3, 3
# Calculate Clinvar score
df.loc[(df['Clinvar_idx'].str.contains('benign')), 'Clinvar_score'] = 1
df.loc[((df['Clinvar_idx'].str.contains('benign')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 2
df.loc[(df['Clinvar_idx'].str.contains('pathogenic')), 'Clinvar_score'] = 5
df.loc[((df['Clinvar_idx'].str.contains('pathogenic')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 4
df.loc[(df['Clinvar_idx'].str.contains('conflicting')), 'Clinvar_score'] = 3
# Calculate Intervar score
df.loc[(df['InterVar_idx'].str.contains('benign')), 'InterVar_score'] = 1
df.loc[((df['InterVar_idx'].str.contains('benign')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 2
df.loc[(df['InterVar_idx'].str.contains('pathogenic')), 'InterVar_score'] = 5
df.loc[((df['InterVar_idx'].str.contains('pathogenic')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 4
# Add them up
df['Patho_score'] = df['Clinvar_score'] + df['InterVar_score']
# Sort by the total patho_score
df = df.sort_values(by=['Patho_score', 'score'], ascending=False)
df = df.drop(['Clinvar_idx', 'InterVar_idx', 'Clinvar_score', 'InterVar_score', 'Patho_score'], axis=1)
return df
def smallVariantGeneOverlapCheckInheritance(args, smallVariantFile, interVarFinalFile, gene_score_result_r, famid):
# Overlap gene_score_result_r with small variants genes found in the proband
gene_score_result_r = gene_score_result_r[gene_score_result_r.gene.isin(smallVariantFile.gene)]
# Subset the intervar files further to store entries relevant to these set of genes
filtered_intervar = pd.merge(interVarFinalFile, gene_score_result_r, left_on='Ref_Gene', right_on='gene',how='inner')
# Remove common artifacts
try:
artifacts = pd.read_csv("./common_artifacts_20.txt", names = ["gene"])
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Ref_Gene'].isin(artifacts['gene'])]
except OSError:
print("Could not open/read the input file: common_artifacts_20.txt")
sys.exit()
# If custom artifact bed file is provided, filter dataframe
if os.path.exists(args.artifact):
#print(filtered_intervar)
custom_artifact = pd.read_csv(args.artifact, sep='\t', usecols=[0, 2] ,names=["Chr", "End"])
keys = list(custom_artifact.columns.values)
i1 = filtered_intervar.set_index(keys).index
i2 = custom_artifact.set_index(keys).index
filtered_intervar = filtered_intervar.loc[~i1.isin(i2)]
# Create a bed file and write it out
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariant_candidates.txt', index=False, sep='\t',header=False) # Write out a subset of the variant first
filtered_intervar_bed = filtered_intervar[['Chr', 'Start', 'End']]
filtered_intervar_bed.loc[:,'Chr'] = 'chr' + filtered_intervar_bed.loc[:,'Chr'].astype(str)
filtered_intervar_bed.loc[:,'Start'] -= 1
pd.DataFrame(filtered_intervar_bed).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_target.bed', index=False, sep='\t', header=False)
# Create two new columns and initialize to -1
# will later get overwritten to 0/1/2 if parents vcf files are provided
filtered_intervar['paternal'] = -1
filtered_intervar['maternal'] = -1
if args.type != 'singleton':
# Get overlapping variants from the parents so we know which variants are inherited
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Comparing small variants (SNPs/indels) inheritance')
cmd1 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.fathervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf"
cmd2 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.mothervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf"
if args.type == 'duo':
if args.father_duo:
cmds = [cmd1]
else:
cmds = [cmd2]
else:
cmds = [cmd1, cmd2]
for cmd in cmds:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Go through every row in filtered_intervar and see if the same variant is found in either of the parents
# We will only compare allele start position (we always assume the alt allele is the same)
if args.type=='trio' or args.father_duo:
try:
paternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
paternal_ov_allele['geno'] = paternal_ov_allele['geno'].str[:1].astype(int) + paternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'paternal', paternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf")
sys.exit()
if args.type=="trio" or args.mother_duo:
try:
maternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
maternal_ov_allele['geno'] = maternal_ov_allele['geno'].str[:1].astype(int) + maternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'maternal', maternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf")
sys.exit()
# Rerank variants based on reported or predicted pathogeneicity
filtered_intervar = rerankSmallVariant(filtered_intervar)
if args.type=='trio':
# Divide the dataset into recessive, dominant, de novo, compound het
## Recessive
recessive = filtered_intervar[(filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 1) & (filtered_intervar['Otherinfo'] == 'hom')]
## Dominant
dominant_inherited = filtered_intervar[((filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 0)) | ((filtered_intervar['maternal'] == 1) & (filtered_intervar['paternal'] == 0))]
## De novo
denovo = filtered_intervar[(filtered_intervar['paternal'] == 0) & (filtered_intervar['maternal'] == 0)]
#Compound het
filtered_intervar_compoundhet = filtered_intervar[(filtered_intervar['Otherinfo'] == 'het')]
filtered_intervar_compoundhet = filtered_intervar_compoundhet[(filtered_intervar_compoundhet['maternal'] != 2) & (filtered_intervar_compoundhet['paternal'] != 2) & ((filtered_intervar_compoundhet['paternal'] == 1) & (filtered_intervar_compoundhet['maternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 1) & (filtered_intervar_compoundhet['paternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 0) & (filtered_intervar_compoundhet['paternal'] == 0))]
count = Counter(filtered_intervar_compoundhet['Ref_Gene'])
compoundhet_genes = [x for x, cnt in count.items() if cnt > 1]
compoundhet = filtered_intervar_compoundhet[filtered_intervar_compoundhet['Ref_Gene'].isin(compoundhet_genes)]
discard = []
for gene in compoundhet_genes:
df = compoundhet[compoundhet['Ref_Gene'].str.contains(gene)]
row_count = len(df.index)
col_list = ['paternal', 'maternal']
res = df[col_list].sum(axis=0)
if ((res[0] == 0) & (res[1] == row_count)) or (res[1] == 0 & (res[0] == row_count)):
discard.append(gene)
compoundhet = compoundhet[~compoundhet['Ref_Gene'].isin(discard)]
# Print all the variants according to inheritance mode
# Recessive
pd.DataFrame(recessive).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_recessive_candidates.txt', index=False, sep='\t', header=True)
# Dominant
pd.DataFrame(dominant_inherited).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_dominant_inherited_smallVariants_candidates.txt', index=False, sep='\t', header=True)
# De novo
pd.DataFrame(denovo).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_denovo_candidates.txt', index=False, sep='\t', header=True)
# Compound het
pd.DataFrame(compoundhet).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_compoundhet_candidates.txt', index=False, sep='\t', header=True)
if args.xlink:
xlink = filtered_intervar.loc[(filtered_intervar['maternal']!=2) & (filtered_intervar['paternal']==0) & (filtered_intervar['Chr'] == 'X')]
pd.DataFrame(xlink).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_xlink_candidates.txt', index=False, sep='\t', header=True)
# All
filtered_intervar = rerankSmallVariant(filtered_intervar)
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariants_ALL_candidates.txt', index=False, sep='\t', header=True)
if args.type=='trio':
# We want to return everything except recessive variants
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Start'].isin(recessive['Start'])] # don't have recessive if singleton or duo
return filtered_intervar
def differentialDiangosis(hpo_syndrome_dict, weightSyndromeDict, clinical_phenome, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup,hpo_syndromes_mim_df):
syndrome_score_result = pd.DataFrame(columns=['syndrome', 'score'])
# Check every syndrome and its overlapping hpo terms
for syndrome in hpo_syndrome_dict:
hpo_terms = set(hpo_syndrome_dict[syndrome])
score = 0
for term in hpo_terms:
if term in clinical_phenome:
score += weightSyndromeDict[term]
if score != 0:
syndrome_score_result = syndrome_score_result.append({'syndrome': syndrome, 'score': score}, ignore_index=True)
syndrome_score_result_r = syndrome_score_result.sort_values(by='score', ascending=False)
syndrome_score_result_r['syndrome'] = syndrome_score_result_r['syndrome'].str.upper()
# Add a normalized score column
syndrome_score_result_r = normalizeRawScore(args, syndrome_score_result_r, 'syndrome')
# Specifically look for deletion/duplication syndrome
delDupSyndrome(syndrome_score_result_r, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup, hpo_syndromes_mim_df)
return(syndrome_score_result_r)
def findGenomicLocation(cytoband_key, cytobandDict):
#print(cytoband_key)
keys = [key for key in cytobandDict if key.startswith(cytoband_key)]
#print(keys)
if len(keys)==0:
cytoband_key = cytoband_key[:-1]
keys = [key for key in cytobandDict if key.startswith(cytoband_key)]
genomic_coords_list = []
for key in keys:
genomic_coords_list.append(str(cytobandDict[key]).split('-'))
#print(genomic_coords_list)
genomic_coords_list = list(chain.from_iterable(genomic_coords_list))
min_coords = min(genomic_coords_list)
max_coords = max(genomic_coords_list)
genomic_range = str(min_coords) + '-' + str(max_coords)
return genomic_range
def parseSyndromeNameToCytoband(df, cytobandDict, type, hpo_syndromes_mim_df,args):
if type=='deldup':
df['cytoband'] = float('Nan')
regex = r'((^|\W)[0-9XY]{1,2}[PQ]{1}[\w\\.\\-]{1,15}[\s$])'
for index, row in df.iterrows():
m = re.search(regex, str(row))
if m is not None:
df.loc[index, 'cytoband'] = m.group(1)
df.dropna(subset=['cytoband'], inplace=True)
if df.empty: # df can be empty after dropping NA
return pd.DataFrame()
if type=='all':
df = df.merge(hpo_syndromes_mim_df, on=['syndrome'])
try:
morbid = pd.read_csv(args.workdir + '/morbidmap.txt', sep='\t', usecols=[2, 3], names=["MIM", "cytoband"], comment='#')
df = df.merge(morbid, on='MIM')
df = df.loc[~df['cytoband'].astype(str).str.contains("Chr")]
end_string = ('p','q')
df = df.loc[~df['cytoband'].str.endswith(end_string)] #Remove cytoband entries that span the whole chromosomal arm like 2p
except OSError:
print("Could not open/read the input file: " + args.workdir + '/morbidmap.txt')
sys.exit()
df['cytoband'] = df['cytoband'].astype(str).str.lower()
df['cytoband'] = df['cytoband'].str.replace('x', 'X')
df['cytoband'] = df['cytoband'].str.replace('y', 'Y')
df['cytoband'] = df['cytoband'].str.strip('\(\)')
df[['Chromosome', 'discard']] = df.cytoband.str.split('p|q', 1, expand=True)
df = df.drop('discard', axis=1)
if df.cytoband.str.contains('-').any():
df[['cytoband_start', 'cytoband_stop']] = df.cytoband.str.split('-', expand=True)
else:
df['cytoband_start'] = df.cytoband
df['cytoband_stop'] = None
df['arm'] = np.where(df['cytoband_start'].str.contains('p'), 'p', 'q')
df['cytoband_stop'] = np.where(df['cytoband_start'].str.count('p|q')>1, df['arm'] + df['cytoband_start'].str.split('p|q').str[2], df['cytoband_stop'])
df['cytoband_start'] = np.where(df['cytoband_start'].str.count('p|q')>1, df['cytoband_start'].str.split('p|q').str[0] + df['arm'] + df['cytoband_start'].str.split('p|q').str[1], df['cytoband_start'])
for idx, row in df.iterrows():
cytoband_start_key = row['cytoband_start'].replace(" ","")
if cytoband_start_key in cytobandDict:
coords_start = cytobandDict[cytoband_start_key]
else:
genomic_range = findGenomicLocation(cytoband_start_key, cytobandDict)
coords_start = genomic_range
if row['cytoband_stop'] is not None: # Fix cytoband_stop column for quick cytobandDict lookup
current_chr = np.where(('p' in str(row['cytoband_stop'])) or ('q' in str(row['cytoband_stop'])), str(row['Chromosome']), str(row['Chromosome']) + str(row['arm']))
edited_cytoband_stop = str(current_chr) + row['cytoband_stop']
edited_cytoband_stop = edited_cytoband_stop.replace(" ", "")
df.at[idx, 'cytoband_stop'] = edited_cytoband_stop
if edited_cytoband_stop in cytobandDict:
coords_stop = cytobandDict[edited_cytoband_stop]
else:
genomic_range = findGenomicLocation(edited_cytoband_stop, cytobandDict)
coords_stop = genomic_range
# New coords will be the the beginning of coords_start and end of coords_stop
df.at[idx, 'Start'] = coords_start.split('-')[0]
df.at[idx, 'End'] = coords_stop.split('-')[1]
else:
df.at[idx, 'Start'] = coords_start.split('-')[0]
df.at[idx, 'End'] = coords_start.split('-')[1]
return df
def createCytobandDict(args):
try:
cyto = pd.read_csv(args.workdir + '/cytoband.txt', sep = '\t', names=["cytoband", "coords"], comment = '#')
except OSError:
print("Count not open/read the input file:" + args.workdir + '/cytoband.txt')
sys.exit()
cytobandDict = dict(zip(cyto.cytoband, cyto.coords))
return(cytobandDict)
def delDupSyndrome(syndrome_score_result_r, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup, hpo_syndromes_mim_df):
#print(syndrome_score_result_r)
syndrome_score_result_r.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_syndrome_score_result_r.txt', sep='\t', index=False)
# Create cytoband <-> genomic coordinates dict
cytobandDict = createCytobandDict(args)
del_cond = syndrome_score_result_r['syndrome'].str.contains('DELETION')
dup_cond = syndrome_score_result_r['syndrome'].str.contains('DUPLICATION')
del_df = syndrome_score_result_r[del_cond]
dup_df = syndrome_score_result_r[dup_cond]
del_df = parseSyndromeNameToCytoband(del_df, cytobandDict,'deldup',hpo_syndromes_mim_df, args)
dup_df = parseSyndromeNameToCytoband(dup_df, cytobandDict,'deldup',hpo_syndromes_mim_df, args)
all_omim_syndromes = parseSyndromeNameToCytoband(syndrome_score_result_r, cytobandDict,'all', hpo_syndromes_mim_df, args)
if args.bionano:
cols = ['Chromosome', 'Start', 'End', 'SmapEntryID', 'Confidence', 'Type', 'Zygosity', 'Genotype', 'SV_size', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'score', 'normalized_score']
# Overlap with del/dup syndromes
if cyto_BN_dup is not None: # It can be None because old Bionano pipeline doesn't call duplications...
# dup_df.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input1.txt', sep='\t', index=False)
# cyto_BN_dup.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t',index=False)
# cyto_10x_dup_largeSV.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t',index=False)
overlap_dup_BN = delDupSyndromeSVOverlap(dup_df, cyto_BN_dup, cols)
overlap_dup_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_duplication_syndrome.txt', sep='\t', index=False)
else:
overlap_dup_BN = None
pd.DataFrame().to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_duplication_syndrome.txt', sep='\t', index=False)
overlap_del_BN = delDupSyndromeSVOverlap(del_df, cyto_BN_del, cols)
overlap_del_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_deletion_syndrome.txt', sep='\t', index=False)
all_BN = pd.concat([cyto_BN_dup, cyto_BN_del], ignore_index=True)
overlap_all_BN = delDupSyndromeSVOverlap(all_omim_syndromes, all_BN, cols)
overlap_all_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_all_syndrome.txt', sep='\t', index=False)
if args.linkedreadSV:
cols = ['Chromosome', 'Start', 'End', 'ID', 'REF', 'ALT_1', 'QUAL', 'FILTER_PASS', 'SVLEN', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'score', 'normalized_score']
# dup_df.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input1.txt', sep='\t', index=False)
# cyto_10x_dup_largeSV.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t', index=False)
overlap_dup_largeSV_10x = delDupSyndromeSVOverlap(dup_df, cyto_10x_dup_largeSV, cols)
overlap_dup_largeSV_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_duplication_largeSV_syndrome.txt', sep='\t', index=False)
overlap_del_largeSV_10x = delDupSyndromeSVOverlap(del_df, cyto_10x_del_largeSV, cols)
overlap_del_largeSV_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_deletion_largeSV_syndrome.txt', sep='\t', index=False)
overlap_del_10x = delDupSyndromeSVOverlap(del_df, cyto_10x_del, cols)
overlap_del_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_deletion_syndrome.txt', sep='\t', index=False)
all_10x = pd.concat([cyto_10x_dup_largeSV, cyto_10x_del_largeSV, cyto_10x_del], ignore_index=True)
overlap_all_10x = delDupSyndromeSVOverlap(all_omim_syndromes, all_10x, cols)
overlap_all_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_all_syndrome.txt', sep='\t', index=False)
if args.linkedreadSV and args.bionano:
cols = ['Chromosome', 'Start', 'End', 'ID', 'REF', 'ALT_1', 'QUAL', 'FILTER_PASS', 'SVLEN', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'SmapEntryID', 'Confidence', 'Type', 'Zygosity', 'Genotype', 'SV_size', 'Found_in_Father_b', 'Found_in_Mother_b', 'score', 'normalized_score',]
# syndrome appearing in both 10x and bionano --> confident set
## for duplications
if ((overlap_dup_BN is not None) and (not overlap_dup_BN.empty) and (not overlap_dup_largeSV_10x.empty)):
overlap_dup_largeSV_10x = overlap_dup_largeSV_10x.loc[overlap_dup_largeSV_10x['SVLEN'] >= 1000]
confident_dup_syndrome = delDupSyndromeSVOverlap(overlap_dup_largeSV_10x, overlap_dup_BN, cols)
if not confident_dup_syndrome.empty:
confident_dup_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_syndrome.txt', sep='\t',index=False)
else: # Write an empty dataframe
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_syndrome.txt', sep='\t',index=False)
## for deletions
del_10x = pd.concat([overlap_del_largeSV_10x, overlap_del_10x])
if ((not overlap_del_BN.empty) and (not del_10x.empty)):
del_10x = del_10x.loc[del_10x['SVLEN'] <= (-1000)]
confidnet_del_syndrome = delDupSyndromeSVOverlap(del_10x, overlap_del_BN, cols)
#confidnet_del_syndrome = pd.merge(del_10x, overlap_del_BN, on='syndrome', how='inner')
if not confidnet_del_syndrome.empty:
confidnet_del_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_syndrome.txt', sep='\t',index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_syndrome.txt', sep='\t',index=False)
# for all omim syndromes
if ((not overlap_all_BN.empty) and (not overlap_all_10x.empty)):
overlap_all_10x = overlap_all_10x.loc[(overlap_all_10x['SVLEN'] <= (-1000)) | (overlap_all_10x['SVLEN'] >=1000)]
confident_all_syndrome = delDupSyndromeSVOverlap(overlap_all_10x, overlap_all_BN, cols)
if not confident_all_syndrome.empty:
confident_all_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_all_syndrome.txt', sep='\t',index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_all_syndrome.txt', sep='\t',index=False)
def delDupSyndromeSVOverlap(del_df, cyto_BN_del, cols):
if del_df.empty:
return pd.DataFrame()
del_df['Chromosome'] = del_df['Chromosome'].str.strip()
if 'cytoband_stop' in list(del_df.columns):
del_df = del_df.drop(['cytoband_start','cytoband_stop'], axis=1)
del_df.dropna( inplace=True)
overlap_del_BN = PyRanges(cyto_BN_del).join(PyRanges(del_df))
if not overlap_del_BN.df.empty:
overlap_del_BN = overlap_del_BN.df
overlap_del_BN['overlap_len'] = np.maximum(0, np.minimum(overlap_del_BN.End, overlap_del_BN.End_b) - np.maximum(overlap_del_BN.Start,overlap_del_BN.Start_b))
#overlap_del_BN = overlap_del_BN.drop(like="_b")
overlap_del_BN = overlap_del_BN.sort_values(by='score', ascending=False)
overlap_del_BN = overlap_del_BN.loc[overlap_del_BN['overlap_len'] > 0]
# print(overlap_del_BN)
#overlap_del_BN = overlap_del_BN.df.sort_values(by='score', ascending=False)
# Rearrange the column
overlap_del_BN = overlap_del_BN[cols].drop_duplicates()
return overlap_del_BN
else:
return overlap_del_BN.df
def normalizeRawScore(args, raw_score, mode):
# Normalize all the scores to 1-100
max_score = max(raw_score['score'])
raw_score.loc[:,'normalized_score'] = raw_score.loc[:,'score']/max_score * 100
return(raw_score)
def compileControlFiles(control_files_path, famid):
full_paths = []
for path in control_files_path:
control_files = os.listdir(path)
for file in control_files:
if not (re.match('BC...0[34]{1}', file) or re.match(rf"BC{famid}..", file)): # Discard trio of interest and all probands
full_paths.append(os.path.join(path, file))
full_paths.append(os.path.join(path, file))
return full_paths
def bionanoSV(args, famid, gene_score_result_r, all_small_variants):
# Generate controls files (1KGP BN samples + CIAPM parents (excluding parents of the proband of interest)
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Generating bionano control file...')
control_files_path = [args.workdir + "/bionano_sv/controls/DLE", args.workdir + "/bionano_sv/controls/BspQI", args.workdir + "/bionano_sv/cases/DLE", args.workdir + "/bionano_sv/cases/BspQI"]
full_paths = compileControlFiles(control_files_path, famid)
## Write an empty file
with open(args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz", 'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "cat " + path + "/exp_refineFinal1_merged_filter.smap | gzip >> " + args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Create a BN arg object
BN_args = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/" + args.sampleid + "/exp_refineFinal1_merged_filter.smap",
fpath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/BC" + famid + "01/exp_refineFinal1_merged_filter.smap",
mpath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/BC" + famid + "02/exp_refineFinal1_merged_filter.smap",
referencepath = args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes=args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
# Call bionano translocation
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano translocations on ' + args.sampleid + '...')
BN_translocation(BN_args)
# Call bionano deletion
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano deletions on ' + args.sampleid + '...')
cyto_BN_del, exon_calls_BN_del = BN_deletion(BN_args)
# Call bionano insertion
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano insertions on ' + args.sampleid + '...')
BN_insertion(BN_args)
# Call bionano duplications
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano duplications on ' + args.sampleid + '...')
cyto_BN_dup, exon_calls_BN_dup = BN_duplication(BN_args)
# Call bionano inversions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano inversions on ' + args.sampleid + '...')
BN_inversion(BN_args)
# Check potential compoundhets with SNPs and indels
BN_exons = pd.concat([exon_calls_BN_del, exon_calls_BN_dup])
if BN_exons.empty:
| pd.DataFrame() | pandas.DataFrame |
import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import needs_i8_conversion
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Series,
Timedelta,
TimedeltaIndex,
)
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_value_counts(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.value_counts()
counter = collections.Counter(obj)
expected = Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.astype(obj.dtype)
if isinstance(obj, pd.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated().any():
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_null(null_obj, index_or_series_obj):
orig = index_or_series_obj
obj = orig.copy()
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(orig, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
counter = collections.Counter(obj.dropna())
expected = Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
result = obj.value_counts()
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
new_entry = Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
result = obj.value_counts(dropna=False)
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_value_counts_inferred(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
msg = "bins argument only works with numeric data"
with pytest.raises(TypeError, match=msg):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]))
| tm.assert_series_equal(res4n, exp4n) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import logging
from src.config import AQ_DATA_DIR, PROC_DATA_DIR
def _strip_string(string):
return string.lstrip().rstrip()
def prepare_station_df(station_df: pd.DataFrame, file_name: str) -> pd.DataFrame:
"""
Prepare the dataframe adding columns with information from file name
:param station_df: pandas dataframe with station data
:param file_name: name of the file needed to extract info
:return: dataframe with date as index,
"""
station_name, region = file_name.split(',')[:2]
city = station_name.split('-')[0]
station_df.columns = [_strip_string(c) for c in station_df.columns]
prep_station_df = station_df.set_index('date').sort_index()
station_info = {
'station_name': _strip_string(station_name),
'city': _strip_string(city),
'region': _strip_string(region)
}
for k, v in station_info.items():
prep_station_df[k] = v
quality_cols = [c for c in prep_station_df.columns if c not in list(station_info.keys())]
return prep_station_df[list(station_info.keys()) + quality_cols]
def load_single_station_data(path_to_file: str, file_name: str) -> pd.DataFrame:
"""
Read the csv and prepare the data adding info from filename
:param path_to_file: path to the folder
:param file_name: nome of the file to be loaded
:return: station dataframe
"""
logging.info("loading file {s}".format(s=file_name))
path_to_file_name = os.path.join(path_to_file, file_name)
station_df = | pd.read_csv(path_to_file_name, na_values=' ', parse_dates=['date']) | pandas.read_csv |
"""
Main script for the paper
A Comparison of Patient History- and EKG-based Cardiac Risk Scores
<NAME>, <NAME>, <NAME>
Proceedings of the AMIA Summit on Clinical Research Informatics (CRI), 2018
Runs various models, saves prediction outcomes.
"""
import feather, os, sys, pickle
from torch.autograd import Variable
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
# import my code
from ekgmodels import misc, base, mlp, resnet
import experiment_data as ed
import simple_baseline as sb
#############################################################################
# Runs all models for all outcomes, saves model output #
# to directory called #
# % prediction-output/<outcome>
#
# where <outcome> is one of
# - "future_afib"
# - "stroke_6m"
# - "troponin"
# - "mace"
#############################################################################
run_ehr_baselines = True
run_beatnet = True
run_resnet = True
features = ["simple",
"remark",
#"vae",
#"remark-vae",
#"simple-vae",
"simple-remark"]
#"simple-remark-vae"]
def run_models():
""" run all models for comparison --- takes a while """
# feature sets and combinations to compare
# Non Deep Methods
if run_ehr_baselines:
run_outcome(outcome = "future_afib", features=features)
run_outcome(outcome = "stroke_6m", features=features)
run_outcome(outcome = "troponin", features=features)
run_outcome(outcome = "mace", features=features)
# MLP outcomes
if run_beatnet:
run_mlp_outcome(outcome='troponin')
run_mlp_outcome(outcome='future_afib')
run_mlp_outcome(outcome='mace')
run_mlp_outcome(outcome='stroke_6m')
# full trace Resnet
if run_resnet:
run_resnet_outcome(outcome='future_afib')
run_resnet_outcome(outcome='mace')
run_resnet_outcome(outcome='stroke_6m')
run_resnet_outcome(outcome='troponin')
def make_figures():
""" make all figures from saved files (in "./prediction-output") """
# first figure --- full ekg example
plot_full_ekg_example()
# stitch together results table
outcomes = ['future_afib', "stroke_6m", "mace"] + \
['trop_3d', 'trop_7d' ,'trop_30d', 'trop_180d']
aucdf_no_hist = results_table(outcomes=outcomes, features=features,
do_logreg=True, do_net=True, subset="no_history")
aucdf = results_table(outcomes=outcomes, features=features,
do_logreg=True, do_net=True, subset=None)
# compute "improvement above simple, improvement above remark, etc"
# look at high MACE risk scores from beatnet
plot_age_based_risk(outcome="mace")
plot_age_based_risk(outcome="stroke_6m")
plot_age_based_risk(outcome="trop_30d")
plot_age_based_risk(outcome="trop_180d")
plot_age_based_risk(outcome="future_afib")
for cat in ['gender', 'race']:
plot_aucs_by_category(outcome="mace", category=cat)
plot_aucs_by_category(outcome="stroke_6m", category=cat)
plot_aucs_by_category(outcome="trop_180d", category=cat)
plot_aucs_by_category(outcome="trop_30d", category=cat)
plot_aucs_by_category(outcome="future_afib", category=cat)
plot_predictive_aucs(outcome="mace")
plot_predictive_aucs(outcome="trop_30d")
plot_predictive_aucs(outcome="trop_180d")
plot_predictive_aucs(outcome="future_afib")
plot_predictive_aucs(outcome="stroke_6m")
# construct and save cohort table
tabledf = ed.make_cohort_table()
print(tabledf)
tabledf.to_latex(os.path.join("prediction-output", "data-table.tex"), escape=False)
def run_outcome(outcome, features):
print("\n\n========================================================")
print(" predicting outcome %s for features %s "%(outcome, str(features)))
# predictor results
outcome_dir = os.path.join("prediction-output/%s"%outcome)
if not os.path.exists(outcome_dir):
os.makedirs(outcome_dir)
# logistic regression --- do all features in parallel
from joblib import Parallel, delayed
res_list = Parallel(n_jobs=len(features), verbose=5)(
delayed(sb.run_logistic)(outcome=outcome, features=f)
for f in features)
for reslr, feats in zip(res_list, features):
print(" saving logreg with features %s "%feats)
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'wb') as f:
pickle.dump(reslr, f)
def run_mlp_outcome(outcome):
# predictor results
outcome_dir = os.path.join("prediction-output/%s"%outcome)
if not os.path.exists(outcome_dir):
os.makedirs(outcome_dir)
# beat models
beatmod = sb.run_beat_mlp(outcome=outcome, use_features=False)
beatmod.save(os.path.join(outcome_dir, "beatnet-raw-ekg.pkl"))
# beat with simple
feats = "simple"
mod = sb.run_beat_mlp(outcome=outcome, use_features=True, features=feats)
mod.save(os.path.join(outcome_dir, "beatnet-%s.pkl"%feats))
def run_resnet_outcome(outcome):
# predictor results
outcome_dir = os.path.join("prediction-output/%s"%outcome)
if not os.path.exists(outcome_dir):
os.makedirs(outcome_dir)
# run EKG Resnet and EKG Beatnet
mod = sb.run_ekg_mlp(outcome=outcome, use_features=False)
mod.save(os.path.join(outcome_dir, "resnet-raw-ekg.pkl"))
feats = "simple"
smod = sb.run_ekg_mlp(outcome=outcome, use_features=False)
smod.save(os.path.join(outcome_dir, "resnet-%s.pkl"%feats))
def best_logistic_model(resdict):
vdf = resdict['valdf']
vauc = vdf[ vdf['metric'] == 'auc' ]
best_idx = np.argmax(vauc['value'].values)
best_mod = vauc['model'].iloc[best_idx]
return best_mod
def pairwise_compare_aucs(outcome="mace", features=["simple", "remark"]):
if "trop_" in outcome:
outcome_dir = os.path.join("prediction-output/troponin")
Xdf, Ydf, encdf = ed.make_dataset(
outcome='troponin', features='simple', do_split=False)
else:
outcome_dir = os.path.join("prediction-output/%s"%outcome)
Xdf, Ydf, encdf = ed.make_dataset(
outcome=outcome, features='simple', do_split=False)
_, _ , mdata = misc.split_data(Xdf.values, Ydf.values, encdf.split, encdf)
split = "test"
subset = "no_history"
zs_mod, ys_mod = {}, {}
for feats in features:
print(" lreg w/ feats: ", feats)
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'rb') as f:
res = pickle.load(f)[outcome]
zs = res['z%s'%split]
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~pd.isnull(encs['has_mace'])
no_idx = no_idx & untested_idx
zs = zs[no_idx]
Ys = Ys[no_idx]
zs_mod[feats] = zs
ys_mod[feats] = Ys
modfiles = ['beatnet-raw-ekg.pkl', 'beatnet-simple.pkl',
'resnet-raw-ekg.pkl', 'resnet-simple.pkl']
modfiles = ['beatnet-raw-ekg.pkl', 'resnet-raw-ekg.pkl', 'beatnet-simple.pkl']
for modfile in modfiles:
# load ekg mlp outcome
print(" ... loading mod file %s"%modfile)
mod = base.load_model(os.path.join(outcome_dir, modfile))
print(" ... has %d params"%mod.num_params())
mdf = mod.fit_res['%sdf-%s'%(split, outcome)]
#mauc = mdf[ mdf['metric']=='auc' ]['string'].iloc[0]
zs = mod.fit_res['z%s-enc-%s'%(split, outcome)]
if not hasattr(zs, 'index'):
split_idx = ['train', 'val', 'test'].index(split)
zs = pd.Series(zs, index=mdata[split_idx].index)
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~pd.isnull(encs['has_mace'])
no_idx = no_idx & untested_idx
zs = zs[no_idx]
Ys = Ys[no_idx]
zs_mod[modfile] = zs
ys_mod[modfile] = Ys
# compare pairs
zsekg = zs_mod['beatnet-raw-ekg.pkl']
zsresnet = zs_mod['resnet-raw-ekg.pkl'].loc[zsekg.index]
zsbase = zs_mod['simple'].loc[zsekg.index]
#zsbase = zs_mod[0].loc[zsekg.index]
##zsrem = zs_mod[1].loc[zsekg.index]
ysbase = Ys.loc[zsekg.index]
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsbase.values, zsekg.values, num_samples=1000)
print(" simple => beatnet ", np.percentile(diff, [2.5, 97.5]))
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsrem.values, zsekg.values, num_samples=1000)
print(" rem => beatnet ", np.percentile(diff, [2.5, 97.5]))
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsresnet.values, zsekg.values, num_samples=1000)
print(" resnet => beatnet ", np.percentile(diff, [2.5, 97.5]))
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsrem.values, zsresnet.values, num_samples=1000)
print(" rem => resnet ", np.percentile(diff, [2.5, 97.5]))
def results_table(outcomes=["future_afib"],
features=["simple", "remark"],
split="test",
subset = None,
do_logreg=True,
do_net=False):
# no history subset
auc_cols = OrderedDict()
for outcome in outcomes:
print("\n===== outcome %s ========"%outcome)
if "trop_" in outcome:
outcome_dir = os.path.join("prediction-output/troponin")
Xdf, Ydf, encdf = ed.make_dataset(
outcome='troponin', features='simple', do_split=False)
else:
outcome_dir = os.path.join("prediction-output/%s"%outcome)
Xdf, Ydf, encdf = ed.make_dataset(
outcome=outcome, features='simple', do_split=False)
_, _ , mdata = misc.split_data(Xdf.values, Ydf.values, encdf.split, encdf)
rows = []
for feats in features:
# lreg results
if do_logreg:
print(" lreg w/ feats: ", feats)
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'rb') as f:
res = pickle.load(f)[outcome]
#best_mod = best_logistic_model(res)
#tdf = res['%sdf'%split]
#auc = tdf[ (tdf['model']==best_mod) & (tdf['metric']=='auc') ]['string'].iloc[0]
zs = res['z%s'%split]
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~ | pd.isnull(encs['has_mace']) | pandas.isnull |
import sys, pandas, csv, os, pickle, codecs
#from scipy import spatial
import argparse, numpy, os
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial.distance import cdist, pdist, squareform
from timeit import default_timer as timer
from scipy.stats.mstats import rankdata
def compute(cos_scores,df_dic_source_target,params,out):
#score #2
total = cos_scores.shape[1]-1
sorted_scores= numpy.argsort(numpy.argsort(cos_scores,axis=1))#rankdata(cos_scores,axis=1)-1
#2nd method
diag = numpy.diagonal(cos_scores)
#max_scores = cos_scores.max(axis=1)
max_index = numpy.where(sorted_scores==total)[1]
max_scores = [cos_scores[idx,d]for idx,d in enumerate(max_index)]
top_index = numpy.where(sorted_scores==total-9)[1]
top_scores = [cos_scores[idx,d]for idx,d in enumerate(top_index)]
dscores = [1 if d==max_scores[idx] else 0 for idx,d in enumerate(diag)] #compares actual value not just rank
dtopscores = [1 if d>=top_scores[idx] else 0 for idx,d in enumerate(diag)]
df_dic_source_target['p1']=dscores
df_dic_source_target['p10']=dtopscores
p1 = df_dic_source_target['p1'].mean()
p10 = df_dic_source_target['p10'].mean()
if params.case:
out_file= "case-sensitive_"+os.path.basename(params.emb)
else:
out_file= "case-insensitive_"+os.path.basename(params.emb)
if "unq" in params.dic:
out_file = "unq_{}".format(out_file)
else:
out_file = "multi_{}".format(out_file)
df_dic_source_target.to_csv(os.path.dirname(params.emb) + "/" + out_file+out+'.out',sep='\t',columns=['source','target','p1','p10'],index=False,encoding='utf-8')
return [p1,p10]
def main():
STDERR_OUT = ""
parser = argparse.ArgumentParser(description='Word translation score')
parser.add_argument("--emb", type=str, default="", help="Path to embedding")
parser.add_argument("--dic", type=str, default="", help="Path to dictionary")
parser.add_argument("--prefix", type=str, default="", help="Language")
parser.add_argument("--out",type=str,default="",help="Output directory")
parser.add_argument("--case",type=bool,default=False,help="Case sensitive")
parser.add_argument("--merge_two_embeddings",type=bool,default=False,help="merge_two_embeddings")
parser.add_argument("--embedding_pr",type=str,default="",help="merge_two_embeddings")
parser.add_argument("--embedding_en",type=str,default="",help="merge_two_embeddings")
params = parser.parse_args()
qualifier = "unq" if "unq" in params.dic else "multi"
if(params.case):
IDENTIFIER = "{}:\t{}\t{}\t".format("Iteration", qualifier, "cAsE")
else:
IDENTIFIER = "{}:\t{}\t{}\t".format("Iteration", qualifier, "case")
print(IDENTIFIER, end='\t', file=sys.stdout, flush=True)
print(sys.argv, file=sys.stdout, flush=True)
if(params.merge_two_embeddings):
print(IDENTIFIER + "Merging two embeddings", file=sys.stdout, flush=True)
assert os.path.isfile(params.embedding_pr)
assert os.path.isfile(params.embedding_en)
f_emb = codecs.open(params.emb, 'w', 'utf-8-sig')
### Do line handling better ###
f_emb.write(
str(int(codecs.open(params.embedding_pr, 'r', 'utf-8-sig').readline().strip().split()[0]) + \
int(codecs.open(params.embedding_en, 'r', 'utf-8-sig').readline().strip().split()[0])))
f_emb.write(" " + codecs.open(params.embedding_pr, 'r', 'utf-8-sig').readline().strip().split()[1] + "\n")
### Line handling done ###
cnt_prefix = False
with codecs.open(params.embedding_pr, 'r', 'utf-8-sig') as read_f:
for line in read_f:
if cnt_prefix is False:
cnt_prefix = True
continue;
f_emb.write(params.prefix + ":" + line)
cnt_en = False
with codecs.open(params.embedding_en, 'r', 'utf-8-sig') as read_f:
for line in read_f:
if cnt_en is False:
cnt_en = True
continue;
f_emb.write("en:" + line)
f_emb.close()
assert os.path.isfile(params.emb)
assert os.path.isfile(params.dic)
we_name = os.path.basename(params.emb)
dic_name = os.path.basename(params.dic)
df_we= pandas.read_csv(params.emb, skiprows=1, sep="\s+",header=None, quoting=csv.QUOTE_NONE,encoding='utf-8')
df_di= pandas.read_csv(params.dic, sep="\t",header=None, quoting=csv.QUOTE_NONE,engine='python',encoding='utf-8')#\s+
if params.case: #case sensitive
df_dic=df_di
else:
df_dic = df_di.applymap(str.lower)
df_we[0]=df_we[0].str.lower()
df_we[0].replace('SPACE_', '').replace('_', ' ')
# df_we[0].loc[df_we[0].startswith('SPACE_'), 'my_channel'] =
# df_we[0] = (df_we[0].startswith('SPACE_')).str.replace('SPACE_', '')
STDERR_OUT = STDERR_OUT + "dict_size:\t{}\t".format(len(df_dic))
df_dic.drop_duplicates(inplace=True)
STDERR_OUT = STDERR_OUT + "final_dict_size:\t{}\t".format(len(df_dic))
STDERR_OUT = STDERR_OUT + "we_size:\t{}\t".format(len(df_we))
df_we.drop_duplicates([0],inplace=True)
STDERR_OUT = STDERR_OUT + "final_we_size:\t{}\t".format(len(df_we))
cnames= ['dim'+str(c) for c in df_we.columns]
cnames[0]='word'
col_names=",".join(cnames)
df_we.columns=col_names.split(',')
df_dic.columns=['source','target']
df_dic_source=pandas.merge(df_dic,df_we,left_on='source',right_on='word')
df_dic_source_target= | pandas.merge(df_dic_source,df_we,left_on='target',right_on='word',suffixes=('_src', '_trg')) | pandas.merge |
import sys
import numpy as np
import os.path as op
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import roc_auc_score, recall_score
sys.path.append(op.abspath(op.dirname(op.dirname(__file__))))
from data_io import DataLoader
from noise_ceiling import compute_noise_ceiling
from metrics import brier_score, tjur_score
subs = [str(s).zfill(2) for s in range(1, 14) if s != 11]
ceilings = np.zeros((len(subs), 6))
y_all = []
for i, sub in enumerate(subs):
dl = DataLoader(sub=sub, log_level=30)
y_doubles = dl.load_y(return_doubles=True)
ceilings[i, :] = compute_noise_ceiling(y_doubles, soft=True, scoring=tjur_score)
dl.log.warning(f"Ceiling sub-{sub}: {ceilings[i, :]}")
# Note to self: between-subject NC only works with 'hard' labels,
# otherwise you need to deal with two sources of "doubles"/inconsistency
dl.load_y(return_doubles=False, strategy_doubles='hard')
y_all.append(dl.y)
# Ceilings per subject
ceilings = | pd.DataFrame(ceilings, columns=dl.le.classes_, index=subs) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Calculation Template
# ## Client: INTERNAL
# ---
# ## Project: PSV tool example
# ## Calc: 2020-CALC-PSV-001
# ## By: <NAME>
# ## Date: December, 2020
# ---
# ## Authentication
# > Stamp, Permit
# ---
# ## Revision History
# |Revision | Date | Description | By | Reviewer|
# | :-------| :----|:------------|:---|:--------|
# | 1.0 | Dec. 2020 | Demo code | KCD | |
# | 2.0 | feb 13 2020 | Python | KCD | |
#
# ---
# In[5]:
import numpy as np
import pandas as pd
import math
import scipy as sp
from scipy import interpolate
def waterTsat(P_kPa):
# equation for saturated steam, 1/T = A + B*ln P + C/ln P
# I fit this myself, 100 to 20000 kPaa
# pressure is in kPaa
# we need to replace this with IAPWS IFC97 formulas
lnP = math.log(P_kPa)
# constants for fit of 1/T = A + B*ln P + C/ln P
tA = 0.00379302
tB = -0.000220828
tC = -0.000425693
invT = tA + tB*lnP + tC/lnP
return((1.0/invT) - 273.15)
def waterPsat(T_c):
# equation for saturated steam, ln P_Pa = A + B/Tk + C*Tk + D*ln(Tk)
# I fit this myself, 100 to 20000 kPaa
# pressure is in kPaa
T_k = T_c + 273.15
pA = 116.6408494
pB = -8572.035364
pC = 0.013736471
pD = -14.73621925
lnPsat = pA + pB/T_k + pC*T_k + pD*math.log(T_k)
return(math.exp(lnPsat)/1000.0)
def getKsh(PkPa, State):
# State is either a temperature in degC or a string
# if this is a temperature, then look up the value in the table
# if not, just return 1.0 because this is saturated
# throw an error if the tempeature is below saturation
# tables for superheat factors. I had a larger table but it threw weird errors. Smaller seems to be better.
Ksh_table = np.full((26,10), 0.0) # pressure in rows, temperature in columns
# julia code for Ksh temperature values in degC
Ksh_tC = np.array([93.33333333,
148.8888889,
204.4444444,
260.0,
315.5555556,
371.1111111,
426.6666667,
482.2222222,
537.7777778,
565.5555556]);
# julia code for Ksh pressure values in kPa
Ksh_pkPa = np.array([137.8951817 ,
344.7379543 ,
689.4759087 ,
1034.213863 ,
1378.951817 ,
1723.689772 ,
2068.427726 ,
2413.16568 ,
2757.903635 ,
3102.641589 ,
3447.379543 ,
3792.117498 ,
4136.855452 ,
4826.331361 ,
5515.807269 ,
6205.283178 ,
6894.759087 ,
7584.234995 ,
8273.710904 ,
8963.186813 ,
9652.662721 ,
10342.13863 ,
12065.8284 ,
13789.51817 ,
17236.89772 ,
20684.27726 ]);
# Julia code for Ksh, rows are pressure, columns are temperature
# subcooled values are denoted with KSH = 1.
# Interpolation near the saturation temperature could give artificially low value for Ksh
# a better method might be to replace the value 1 for the nearest subcooled temperature
# with the value that gives 1 when interpolated to the saturation tempeature.
# Clumsy, but it should work
Ksh_table = np.array([[ 1 , 0.99455814 , 0.987 , 0.93 , 0.882 , 0.841 , 0.805 , 0.774 , 0.745 , 0.732 ],
[ 1 , 0.997925224 , 0.987 , 0.93 , 0.882 , 0.841 , 0.805 , 0.774 , 0.745 , 0.732 ],
[ 1 , 1 , 0.998 , 0.935 , 0.885 , 0.843 , 0.807 , 0.775 , 0.746 , 0.733 ],
[ 1 , 1 , 0.984 , 0.94 , 0.888 , 0.846 , 0.808 , 0.776 , 0.747 , 0.733 ],
[ 1 , 1 , 0.979 , 0.945 , 0.892 , 0.848 , 0.81 , 0.777 , 0.748 , 0.734 ],
[ 1 , 1 , 1 , 0.951 , 0.895 , 0.85 , 0.812 , 0.778 , 0.749 , 0.735 ],
[ 1 , 1 , 1 , 0.957 , 0.898 , 0.852 , 0.813 , 0.78 , 0.75 , 0.736 ],
[ 1 , 1 , 1 , 0.963 , 0.902 , 0.854 , 0.815 , 0.781 , 0.75 , 0.736 ],
[ 1 , 1 , 1 , 0.963 , 0.906 , 0.857 , 0.816 , 0.782 , 0.751 , 0.737 ],
[ 1 , 1 , 1 , 0.961 , 0.909 , 0.859 , 0.818 , 0.783 , 0.752 , 0.738 ],
[ 1 , 1 , 1 , 0.961 , 0.914 , 0.862 , 0.82 , 0.784 , 0.753 , 0.739 ],
[ 1 , 1 , 1 , 0.962 , 0.918 , 0.864 , 0.822 , 0.785 , 0.754 , 0.74 ],
[ 1 , 1 , 1 , 0.964 , 0.922 , 0.867 , 0.823 , 0.787 , 0.755 , 0.74 ],
[ 1 , 1 , 1 , 1 , 0.931 , 0.872 , 0.827 , 0.789 , 0.757 , 0.742 ],
[ 1 , 1 , 1 , 1 , 0.942 , 0.878 , 0.83 , 0.792 , 0.759 , 0.744 ],
[ 1 , 1 , 1 , 1 , 0.953 , 0.883 , 0.834 , 0.794 , 0.76 , 0.745 ],
[ 1 , 1 , 1 , 1 , 0.959 , 0.89 , 0.838 , 0.797 , 0.762 , 0.747 ],
[ 1 , 1 , 1 , 1 , 0.962 , 0.896 , 0.842 , 0.8 , 0.764 , 0.749 ],
[ 1 , 1 , 1 , 1 , 0.966 , 0.903 , 0.846 , 0.802 , 0.766 , 0.75 ],
[ 1 , 1 , 1 , 1 , 0.973 , 0.91 , 0.85 , 0.805 , 0.768 , 0.752 ],
[ 1 , 1 , 1 , 1 , 0.982 , 0.918 , 0.854 , 0.808 , 0.77 , 0.754 ],
[ 1 , 1 , 1 , 1 , 0.993 , 0.926 , 0.859 , 0.811 , 0.772 , 0.755 ],
[ 1 , 1 , 1 , 1 , 1 , 0.94 , 0.862 , 0.81 , 0.77 , 0.752 ],
[ 1 , 1 , 1 , 1 , 1 , 0.952 , 0.861 , 0.805 , 0.762 , 0.744 ],
[ 1 , 1 , 1 , 1 , 1 , 0.951 , 0.852 , 0.787 , 0.74 , 0.721 ],
[ 1 , 1 , 1 , 1 , 1 , 1 , 0.831 , 0.753 , 0.704 , 0.684 ]])
# we have tables
# we will use linear interpolation with P and T.
# Using ln P and 1/T might be more robust and needs to be investigated.
# I need to find an interpolation routine
# linear_Ksh = reshape(Ksh_table,(10*26,1));
# Ksh_grid = GridInterpolations.RectangleGrid(Ksh_pkPa, Ksh_tC); # rectangular grid
#>>> xx, yy = np.meshgrid(x, y)
#>>> z = np.sin(xx**2+yy**2)
# this is the interpolation function
returnFnct = sp.interpolate.interp2d(Ksh_tC, Ksh_pkPa, Ksh_table, kind='linear')
Ksh = 1.0 # default value
if (isinstance(State,float)):
returnVal = returnFnct(State, PkPa)
Ksh = returnVal[0]
# check if we are subcooled
pSat = waterPsat(State)
if (pSat < PkPa):
raise Exception("Temperature is in subcooled region")
return (Ksh)
# that was easy
def PSVareaOrifice(letter):
# from the PSV designation letter, output the PSV area in MM2
# create the table
data = {"Designation": ["D","E","F","G","H","J","K","L","M","N","P","Q","R","T"],
"typicalFlanges": ["1.5D2", "1.5E2", "1.5F3", "1.5G3", "2H3", "3J4", "3K4", "4L6", "4M6", "4N6", "4P6", "6Q8", "6R10", "8T10"],
"areaIN2": [0.11, 0.20, 0.31, 0.50, 0.79, 1.29, 1.84, 2.85, 3.60, 4.34, 6.38, 11.05, 16.00, 26.00],
"areaMM2": [70.9676, 126.4514, 198.0641, 324.5155, 506.4506, 830.3209, 1185.804, 1840.641, 2322.576, 2799.994, 4116.121, 7129.018, 10322.56, 16774.16] }
psvTable = | pd.DataFrame(data, columns = ["Designation", "typicalFlanges", "areaIN2", "areaMM2"]) | pandas.DataFrame |
import matplotlib.pyplot as plt
#import seaborn as sns
import math
import itertools
import numpy as np
import pandas as pd
def preplot(x_df_se,y_se,xlab="Index",ylab="Value"):
if type(x_df_se) is pd.core.series.Series:
x_df = | pd.DataFrame() | pandas.DataFrame |
from collections import defaultdict
from collections import OrderedDict
import os
from tqdm import tqdm
import glob
import pandas as pd
from riboraptor.utils import mkdir_p
from riboraptor.helpers import path_leaf
def summarize_ribotrocer_orf_project(rootdir):
srp = path_leaf(rootdir)
srp = path_leaf(rootdir)
samples = glob.glob("{}/ribotricer_results/*_translating_ORFs.tsv".format(rootdir))
summarized_orf_data = []
summarized_phase_scores_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from ingestion.standardized_columns import (HISPANIC_COL, RACE_COL,
STATE_FIPS_COL, COUNTY_FIPS_COL,
STATE_NAME_COL, COUNTY_NAME_COL,
POPULATION_COL, AGE_COL, SEX_COL,
Race, RACE_CATEGORY_ID_COL,
RACE_INCLUDES_HISPANIC_COL,
TOTAL_VALUE, POPULATION_PCT_COL,
add_race_columns_from_category_id)
from ingestion import url_file_to_gcs, gcs_to_bq_util, census
from datasources.data_source import DataSource
from ingestion.census import (get_census_params, parse_acs_metadata,
get_vars_for_group, standardize_frame)
from ingestion.dataset_utils import add_sum_of_rows, generate_pct_share_col
# TODO pass this in from message data.
BASE_ACS_URL = "https://api.census.gov/data/2019/acs/acs5"
HISPANIC_BY_RACE_CONCEPT = "HISPANIC OR LATINO ORIGIN BY RACE"
GROUPS = {
# Hispanic/latino separate. When doing it this way, we don't get sex/age
# breakdowns. This is the best way to get cannonical race/ethnicity categories
"B03002": HISPANIC_BY_RACE_CONCEPT,
# By sex and age, for various races.
"B01001": "SEX BY AGE",
"B01001A": "SEX BY AGE (WHITE ALONE)",
"B01001B": "SEX BY AGE (BLACK OR AFRICAN AMERICAN ALONE)",
"B01001C": "SEX BY AGE (AMERICAN INDIAN AND ALASKA NATIVE ALONE)",
"B01001D": "SEX BY AGE (ASIAN ALONE)",
"B01001E": "SEX BY AGE (NATIVE HAWAIIAN AND OTHER PACIFIC ISLANDER ALONE)",
"B01001F": "SEX BY AGE (SOME OTHER RACE ALONE)",
"B01001G": "SEX BY AGE (TWO OR MORE RACES)",
"B01001H": "SEX BY AGE (WHITE ALONE, NOT HISPANIC OR LATINO)",
"B01001I": "SEX BY AGE (HISPANIC OR LATINO)"
}
SEX_BY_AGE_CONCEPTS_TO_RACE = {
# These include Hispanic/Latino, so they're not standardized categories.
"SEX BY AGE": Race.TOTAL.value,
"SEX BY AGE (WHITE ALONE)": Race.WHITE.value,
"SEX BY AGE (BLACK OR AFRICAN AMERICAN ALONE)": Race.BLACK.value,
"SEX BY AGE (AMERICAN INDIAN AND ALASKA NATIVE ALONE)": Race.AIAN.value,
"SEX BY AGE (ASIAN ALONE)": Race.ASIAN.value,
"SEX BY AGE (NATIVE HAWAIIAN AND OTHER PACIFIC ISLANDER ALONE)": Race.NHPI.value,
"SEX BY AGE (SOME OTHER RACE ALONE)": Race.OTHER_STANDARD.value,
"SEX BY AGE (TWO OR MORE RACES)": Race.MULTI.value,
"SEX BY AGE (HISPANIC OR LATINO)": Race.HISP.value,
# Doesn't include Hispanic/Latino
"SEX BY AGE (WHITE ALONE, NOT HISPANIC OR LATINO)": Race.WHITE_NH.value
}
RACE_STRING_TO_CATEGORY_ID_INCLUDE_HISP = {
"American Indian and Alaska Native alone": Race.AIAN.value,
"Asian alone": Race.ASIAN.value,
"Black or African American alone": Race.BLACK.value,
"Native Hawaiian and Other Pacific Islander alone": Race.NHPI.value,
"Some other race alone": Race.OTHER_STANDARD.value,
"Two or more races": Race.MULTI.value,
"White alone": Race.WHITE.value
}
RACE_STRING_TO_CATEGORY_ID_EXCLUDE_HISP = {
"American Indian and Alaska Native alone": Race.AIAN_NH.value,
"Asian alone": Race.ASIAN_NH.value,
"Black or African American alone": Race.BLACK_NH.value,
"Native Hawaiian and Other Pacific Islander alone": Race.NHPI_NH.value,
"Some other race alone": Race.OTHER_STANDARD_NH.value,
"Two or more races": Race.MULTI_NH.value,
"White alone": Race.WHITE_NH.value
}
# This only works for the "Total" race category, because ACS provides more
# granular age buckets when looking at all races than when breaking down by
# race.
def get_decade_age_bucket(age_range):
if age_range in {'0-4', '5-9'}:
return '0-9'
elif age_range in {'10-14', '15-17', '18-19'}:
return '10-19'
elif age_range in {'20-20', '21-21', '22-24', '25-29'}:
return '20-29'
elif age_range in {'30-34', '35-39'}:
return '30-39'
elif age_range in {'40-44', '45-49'}:
return '40-49'
elif age_range in {'50-54', '55-59'}:
return '50-59'
elif age_range in {'60-61', '62-64', '65-66', '67-69'}:
return '60-69'
elif age_range in {'70-74', '75-79'}:
return '70-79'
elif age_range in {'80-84', '85+'}:
return '80+'
elif age_range == 'Total':
return 'Total'
else:
return 'Unknown'
def get_uhc_age_bucket(age_range):
if age_range in {'18-19', '20-24', '20-20', '21-21', '22-24', '25-29', '30-34', '35-44', '35-39', '40-44'}:
return '18-44'
elif age_range in {'45-54', '45-49', '50-54', '55-64', '55-59', '60-61', '62-64'}:
return '45-64'
elif age_range in {'65-74', '65-66', '67-69', '70-74', '75-84', '75-79', '80-84', '85+'}:
return '65+'
elif age_range == 'Total':
return 'Total'
def rename_age_bracket(bracket):
"""Converts ACS age bracket label to standardized bracket format of "a-b",
where a is the lower end of the bracket and b is the upper end,
inclusive.
bracket: ACS age bracket."""
parts = bracket.split()
if len(parts) == 3 and parts[0] == "Under":
return "0-" + str(int(parts[1]) - 1)
elif len(parts) == 4 and parts[1] == "to" and parts[3] == "years":
return parts[0] + "-" + parts[2]
elif len(parts) == 4 and parts[1] == "and" and parts[3] == "years":
return parts[0] + "-" + parts[2]
elif len(parts) == 2 and parts[1] == "years":
return parts[0] + "-" + parts[0]
elif len(parts) == 4 and " ".join(parts[1:]) == "years and over":
return parts[0] + "+"
else:
return bracket
def update_col_types(frame):
"""Returns a new DataFrame with the column types replaced with int64 for
population columns and string for other columns.
frame: The original DataFrame"""
colTypes = {}
for col in frame.columns:
if col != "NAME" and col != "state" and col != "county":
colTypes[col] = "int64"
else:
colTypes["state"] = "string"
frame = frame.astype(colTypes)
return frame
class ACSPopulationIngester():
"""American Community Survey population data in the United States from the
US Census."""
def __init__(self, county_level, base_acs_url):
# The base ACS url to use for API calls.
self.base_acs_url = base_acs_url
# Whether the data is at the county level. If false, it is at the state
# level
self.county_level = county_level
# The base columns that are always used to group by.
self.base_group_by_cols = (
[STATE_FIPS_COL, COUNTY_FIPS_COL, COUNTY_NAME_COL] if county_level
else [STATE_FIPS_COL, STATE_NAME_COL])
# The base columns that are always used to sort by
self.base_sort_by_cols = (
[STATE_FIPS_COL, COUNTY_FIPS_COL] if county_level
else [STATE_FIPS_COL])
def upload_to_gcs(self, gcs_bucket):
"""Uploads population data from census to GCS bucket."""
metadata = census.fetch_acs_metadata(self.base_acs_url)
var_map = parse_acs_metadata(metadata, list(GROUPS.keys()))
concepts = list(SEX_BY_AGE_CONCEPTS_TO_RACE.keys())
concepts.append(HISPANIC_BY_RACE_CONCEPT)
file_diff = False
for concept in concepts:
group_vars = get_vars_for_group(concept, var_map, 2)
cols = list(group_vars.keys())
url_params = get_census_params(cols, self.county_level)
concept_file_diff = url_file_to_gcs.url_file_to_gcs(
self.base_acs_url, url_params, gcs_bucket,
self.get_filename(concept))
file_diff = file_diff or concept_file_diff
return file_diff
def write_to_bq(self, dataset, gcs_bucket):
"""Writes population data to BigQuery from the provided GCS bucket
dataset: The BigQuery dataset to write to
gcs_bucket: The name of the gcs bucket to read the data from"""
# TODO change this to have it read metadata from GCS bucket
metadata = census.fetch_acs_metadata(self.base_acs_url)
var_map = parse_acs_metadata(metadata, list(GROUPS.keys()))
race_and_hispanic_frame = gcs_to_bq_util.load_values_as_dataframe(
gcs_bucket, self.get_filename(HISPANIC_BY_RACE_CONCEPT))
race_and_hispanic_frame = update_col_types(race_and_hispanic_frame)
race_and_hispanic_frame = standardize_frame(
race_and_hispanic_frame,
get_vars_for_group(HISPANIC_BY_RACE_CONCEPT, var_map, 2),
[HISPANIC_COL, RACE_COL],
self.county_level,
POPULATION_COL)
sex_by_age_frames = {}
for concept in SEX_BY_AGE_CONCEPTS_TO_RACE:
sex_by_age_frame = gcs_to_bq_util.load_values_as_dataframe(
gcs_bucket, self.get_filename(concept))
sex_by_age_frame = update_col_types(sex_by_age_frame)
sex_by_age_frames[concept] = sex_by_age_frame
frames = {
self.get_table_name_by_race(): self.get_all_races_frame(
race_and_hispanic_frame),
self.get_table_name_by_sex_age_race(): self.get_sex_by_age_and_race(
var_map, sex_by_age_frames)
}
frames['by_sex_age_%s' % self.get_geo_name()] = self.get_by_sex_age(
frames[self.get_table_name_by_sex_age_race()], get_decade_age_bucket)
by_sex_age_uhc = None
if not self.county_level:
by_sex_age_uhc = self.get_by_sex_age(frames[self.get_table_name_by_sex_age_race()], get_uhc_age_bucket)
frames['by_age_%s' % self.get_geo_name()] = self.get_by_age(
frames['by_sex_age_%s' % self.get_geo_name()],
by_sex_age_uhc)
frames['by_sex_%s' % self.get_geo_name()] = self.get_by_sex(
frames[self.get_table_name_by_sex_age_race()])
for table_name, df in frames.items():
# All breakdown columns are strings
column_types = {c: 'STRING' for c in df.columns}
column_types[POPULATION_COL] = 'INT64'
if RACE_INCLUDES_HISPANIC_COL in df.columns:
column_types[RACE_INCLUDES_HISPANIC_COL] = 'BOOL'
if POPULATION_PCT_COL in df.columns:
column_types[POPULATION_PCT_COL] = 'FLOAT'
gcs_to_bq_util.add_dataframe_to_bq(
df, dataset, table_name, column_types=column_types)
def get_table_geo_suffix(self):
return "_county" if self.county_level else "_state"
def get_geo_name(self):
return 'county' if self.county_level else 'state'
def get_fips_col(self):
return COUNTY_FIPS_COL if self.county_level else STATE_FIPS_COL
def get_geo_name_col(self):
return COUNTY_NAME_COL if self.county_level else STATE_NAME_COL
def get_table_name_by_race(self):
return "by_race" + self.get_table_geo_suffix() + "_std"
def get_table_name_by_sex_age_race(self):
return "by_sex_age_race" + self.get_table_geo_suffix() + "_std"
def get_filename(self, concept):
"""Returns the name of a file for the given ACS concept
concept: The ACS concept description, eg 'SEX BY AGE'"""
return self.add_filename_suffix(concept.replace(" ", "_"))
def add_filename_suffix(self, root_name):
"""Adds geography and file type suffix to the root name.
root_name: The root file name."""
return root_name + self.get_table_geo_suffix() + ".json"
def sort_race_frame(self, df):
sort_cols = self.base_sort_by_cols.copy()
sort_cols.append(RACE_CATEGORY_ID_COL)
return df.sort_values(sort_cols).reset_index(drop=True)
def sort_sex_age_race_frame(self, df):
sort_cols = self.base_sort_by_cols.copy()
# Note: This sorts alphabetically, which isn't ideal for the age column.
# However, it doesn't matter how these are sorted in the backend, this
# is just for convenience when looking at the data in BigQuery.
sort_cols.extend([RACE_CATEGORY_ID_COL, SEX_COL, AGE_COL])
return df.sort_values(sort_cols).reset_index(drop=True)
def standardize_race_exclude_hispanic(self, df):
"""Standardized format using mutually exclusive groups by excluding
Hispanic or Latino from other racial groups. Summing across all race
categories equals the total population."""
def get_race_category_id_exclude_hispanic(row):
if (row[HISPANIC_COL] == 'Hispanic or Latino'):
return Race.HISP.value
else:
return RACE_STRING_TO_CATEGORY_ID_EXCLUDE_HISP[row[RACE_COL]]
standardized_race = df.copy()
standardized_race[RACE_CATEGORY_ID_COL] = standardized_race.apply(
get_race_category_id_exclude_hispanic, axis=1)
standardized_race.drop(HISPANIC_COL, axis=1, inplace=True)
group_by_cols = self.base_group_by_cols.copy()
group_by_cols.append(RACE_CATEGORY_ID_COL)
standardized_race = standardized_race.groupby(
group_by_cols).sum().reset_index()
return standardized_race
def standardize_race_include_hispanic(self, df):
"""Alternative format where race categories include Hispanic/Latino.
Totals are also included because summing over the column will give a
larger number than the actual total."""
by_hispanic = df.copy()
group_by_cols = self.base_group_by_cols.copy()
group_by_cols.append(HISPANIC_COL)
by_hispanic = by_hispanic.groupby(group_by_cols).sum().reset_index()
by_hispanic[RACE_CATEGORY_ID_COL] = by_hispanic.apply(
lambda r: (Race.HISP.value
if r[HISPANIC_COL] == 'Hispanic or Latino'
else Race.NH.value),
axis=1)
by_hispanic.drop(HISPANIC_COL, axis=1, inplace=True)
by_race = df.copy()
group_by_cols = self.base_group_by_cols.copy()
group_by_cols.append(RACE_COL)
by_race = by_race.groupby(group_by_cols).sum().reset_index()
by_race[RACE_CATEGORY_ID_COL] = by_race.apply(
lambda r: RACE_STRING_TO_CATEGORY_ID_INCLUDE_HISP[r[RACE_COL]],
axis=1)
return | pd.concat([by_hispanic, by_race]) | pandas.concat |
import asyncio
import os
import os.path
import pandas as pd # type: ignore
import pyEX # type: ignore
from collections import deque
from datetime import datetime, timedelta
from tqdm import tqdm # type: ignore
from typing import AsyncGenerator, Any, Deque, List
from aat.exchange import Exchange
from aat.config import InstrumentType, EventType, Side, TradingType
from aat.core import ExchangeType, Instrument, Event, Trade, Order
_iex_instrument_types = {
"ad": InstrumentType.EQUITY, # ad - ADR
"gdr": InstrumentType.EQUITY, # gdr - GDR
"re": InstrumentType.OTHER, # re - REIT
"ce": InstrumentType.MUTUALFUND, # ce - Closed end fund
"si": InstrumentType.EQUITY, # si - Secondary Issue
"lp": InstrumentType.OTHER, # lp - Limited Partnerships
"cs": InstrumentType.EQUITY, # cs - Common Stock
"et": InstrumentType.EQUITY, # et - ETF
"wt": InstrumentType.OTHER, # wt - Warrant
"rt": InstrumentType.OTHER, # rt – Right
"oef": InstrumentType.MUTUALFUND, # oef - Open Ended Fund
"cef": InstrumentType.MUTUALFUND, # cef - Closed Ended Fund
"ps": InstrumentType.EQUITY, # ps - Preferred Stock
"ut": InstrumentType.OTHER, # ut - Unit
"struct": InstrumentType.OTHER, # struct - Structured Product
}
class IEX(Exchange):
"""Investor's Exchange"""
def __init__(
self,
trading_type: TradingType,
verbose: bool,
api_key: str,
is_sandbox: bool,
timeframe: str = "1y",
start_date: str = "",
end_date: str = "",
cache_data: bool = True,
) -> None:
super().__init__(ExchangeType("iex"))
self._trading_type = trading_type
self._verbose = verbose
self._api_key = api_key
self._is_sandbox = is_sandbox
self._cache_data = cache_data
if trading_type == TradingType.LIVE:
assert not is_sandbox
self._timeframe = timeframe
if timeframe == "live":
assert trading_type != TradingType.BACKTEST
if timeframe == "1d":
# intraday testing
# TODO if today is weekend/holiday, pick last day with data
self._start_date = (
datetime.strptime(start_date, "%Y%m%d")
if start_date
else datetime.today()
)
self._end_date = (
datetime.strptime(end_date, "%Y%m%d") if end_date else datetime.today()
)
self._subscriptions: List[Instrument] = []
# "Order" management
self._queued_orders: Deque[Order] = deque()
self._order_id = 1
# *************** #
# General methods #
# *************** #
async def connect(self) -> None:
"""connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
"""
self._client = pyEX.Client(
self._api_key, "sandbox" if self._is_sandbox else "stable"
)
# ******************* #
# Market Data Methods #
# ******************* #
async def instruments(self) -> List[Instrument]:
"""get list of available instruments"""
instruments = []
symbols = self._client.symbols()
for record in symbols:
if (
not record["isEnabled"]
or not record["type"]
or record["type"] == "temp"
):
continue
symbol = record["symbol"]
brokerExchange = record["exchange"]
type = _iex_instrument_types[record["type"]]
currency = Instrument(type=InstrumentType.CURRENCY, name=record["currency"])
try:
inst = Instrument(
name=symbol,
type=type,
exchange=self.exchange(),
brokerExchange=brokerExchange,
currency=currency,
)
except AssertionError:
# Happens sometimes on sandbox
continue
instruments.append(inst)
return instruments
async def subscribe(self, instrument: Instrument) -> None:
self._subscriptions.append(instrument)
async def tick(self) -> AsyncGenerator[Any, Event]: # type: ignore[override]
"""return data from exchange"""
if self._timeframe == "live":
data: Deque[dict] = deque()
def _callback(record: dict) -> None:
data.append(record)
self._client.tradesSSE(
symbols=",".join([i.name for i in self._subscriptions]),
on_data=_callback,
)
while True:
while data:
record = data.popleft()
volume = record["volume"]
price = record["price"]
instrument = Instrument(record["symbol"], InstrumentType.EQUITY)
o = Order(
volume=volume,
price=price,
side=Side.BUY,
instrument=instrument,
exchange=self.exchange(),
)
t = Trade(
volume=volume, price=price, taker_order=o, maker_orders=[]
)
yield Event(type=EventType.TRADE, target=t)
await asyncio.sleep(0)
else:
dfs = []
insts = set()
if self._timeframe != "1d":
for i in tqdm(self._subscriptions, desc="Fetching data..."):
if i.name in insts:
# already fetched the data, multiple subscriptions
continue
if self._cache_data:
# first, check if we have this data and its cached already
os.makedirs("_aat_data", exist_ok=True)
data_filename = os.path.join(
"_aat_data",
"iex_{}_{}_{}_{}.pkl".format(
i.name,
self._timeframe,
datetime.now().strftime("%Y%m%d"),
"sand" if self._is_sandbox else "",
),
)
if os.path.exists(data_filename):
print("using cached IEX data for {}".format(i.name))
df = pd.read_pickle(data_filename)
else:
df = self._client.chartDF(i.name, timeframe=self._timeframe)
df.to_pickle(data_filename)
else:
df = self._client.chartDF(i.name, timeframe=self._timeframe)
df = df[["close", "volume"]]
df.columns = ["close:{}".format(i.name), "volume:{}".format(i.name)]
dfs.append(df)
insts.add(i.name)
data_frame = pd.concat(dfs, axis=1)
data_frame.sort_index(inplace=True)
data_frame = data_frame.groupby(data_frame.index).last()
data_frame.drop_duplicates(inplace=True)
data_frame.fillna(method="ffill", inplace=True)
else:
for i in tqdm(self._subscriptions, desc="Fetching data..."):
if i.name in insts:
# already fetched the data, multiple subscriptions
continue
date = self._start_date
subdfs = []
while date <= self._end_date:
if self._cache_data:
# first, check if we have this data and its cached already
os.makedirs("_aat_data", exist_ok=True)
data_filename = os.path.join(
"_aat_data",
"iex_{}_{}_{}_{}.pkl".format(
i.name,
self._timeframe,
date,
"sand" if self._is_sandbox else "",
),
)
if os.path.exists(data_filename):
print(
"using cached IEX data for {} - {}".format(
i.name, date
)
)
df = pd.read_pickle(data_filename)
else:
df = self._client.chartDF(
i.name, timeframe="1d", date=date.strftime("%Y%m%d")
)
df.to_pickle(data_filename)
else:
df = self._client.chartDF(
i.name, timeframe="1d", date=date.strftime("%Y%m%d")
)
if not df.empty:
df = df[["average", "volume"]]
df.columns = [
"close:{}".format(i.name),
"volume:{}".format(i.name),
]
subdfs.append(df)
date += timedelta(days=1)
dfs.append(pd.concat(subdfs))
insts.add(i.name)
data_frame = | pd.concat(dfs, axis=1) | pandas.concat |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import scipy.stats as ss
import os
#import matplotlib.pyplot as plt
import matplotlib
#matplotlib.get_backend()
from matplotlib import pyplot as plt
import seaborn as sns
#import matplotlib.pyplot as plt
#import matplotlib
#matplotlib.use('TkAgg')
#import matplotlib.pyplot as plt
import json
import os
import ast
from scipy.spatial import distance
import argparse
from collections import Counter, OrderedDict
from operator import itemgetter
#from sklearn.metrics import jaccard_similarity_score
#from sklearn.metrics import jaccard_score
parser = argparse.ArgumentParser()#help="--fields_path , --data_folder_name --proto ")
parser.add_argument('--proto', type=str, default="dns")#, required=True)
parser.add_argument('--proto_folder', default=None)#, required=True)
parser.add_argument('--plot_root_dir', type=str, default="./qp_plots")#, required=True)
parser.add_argument('--qp_dir', type=str, default="./qps/out_DNS_10k_query_searchout_May11dns_sec")#, required=True)
parser.add_argument('--depth_pq_file', type=str, default="dnssec_DAG_QPS_depth_median.npy")#, required=True)
#parser.add_argument('--depths', type=str, default="dnssec_DAG_QPS_depth_median.npy")#, required=True)
parser.add_argument('--depth', nargs='+', type=int, help='<Required> depth flag', required=True)
parser.add_argument('--max_plot_similarity', action='store_true', default=False)
parser.add_argument('--all_plot_similarity', action='store_true', default=False)
parser.add_argument('--DNSSEC_True', action='store_true', default=False)
#parser.add_argument('--intermediate_data_folder', type=str, default="./intermediate_data")
#parser.add_argument('--aggregate_summary', default=False, action='store_true')
args = parser.parse_args()
print(args)
# CHANGE these names when generating new data/protocol/signature
# Queries_filename = 'out_dns1kdns_sec-1.csv'
plot_root_dir = args.plot_root_dir# "./qp_plots"
#qp_dir = "."#"/Users/soojin/Google Drive/Research/AmpMap/Eval_Current/MeasurementOut/QueryPattern/out_DNS_10k_query_searchout_May11dns_sec"
qp_dir = args.qp_dir # "./qps/out_DNS_10k_query_searchout_May11dns_sec"
#qp_dir = "./qps/out_dns1kdns_sec/"# "./qps/out_DNS_10k_query_searchout_May11dns_sec"
# PERCENTILE = 98
#######Relevant file
Queries_filename = os.path.join( qp_dir, "ALL_Queries.csv")
sig_filename = os.path.join(qp_dir, 'sigs.npz')
#depth_pq_file = os.path.join(qp_dir,"dnssec_DAG_QPS_depth_median.npy" )
depth_pq_file = os.path.join(qp_dir, args.depth_pq_file )
domain_dnssec = ['berkeley.edu', 'energy.gov', 'aetna.com', 'Nairaland.com']
depth_minus1_file = os.path.join( qp_dir, "Hamming_2.csv")
# Queries_filename = os.path.join( qp_dir, 'ALL_Queries.csv')
# sig_filename = os.path.join(qp_dir, 'sigs.npz')
# depth_pq_file = os.path.join(qp_dir,"dnssec_DAG_QPS_depth_median.npy" )
# domain_dnssec = ['berkeley.edu', 'energy.gov', 'aetna.com', 'Nairaland.com']
# depth_minus1_file = os.path.join( qp_dir, "Hamming_2.csv")
topK = 10
####flag
all_plot_similarity = args.all_plot_similarity
max_plot_similarity = args.max_plot_similarity
DNSSEC_True= args.DNSSEC_True # True
PROTO = args.proto
# if PROTO.lower() == "dns" and DNSSEC_True == True:
# PROTO = "dns-dnssec"
# elif PROTO.lower() == "dns" and DNSSEC_True == False:
# PROTO = "dns-nodnssec"
if args.proto_folder == None:
args.proto_folder = PROTO
print(" ", args.proto_folder )
proto_dir = os.path.join(plot_root_dir, args.proto_folder ) #"yucheng_plots/"+PROTO
SetCover_True = True
# load QPs
# depths = [-1] #6,5,4,3,2,1] #[-1]
depths = args.depth # [0,1,2,3,4,5,6,7,8,9]
########### Hamming QP ######################################
# out_dir = "yucheng_plots/"+PROTO+"/hamming"
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# QPs = pd.read_csv(QP_filename)
# QPs.sort_values(by=['amp_fac'], ascending=False,inplace=True)
#############################################################
#plt.clf()
#sys.exit(1)
def compute_percentile(QP_AFs, PERCENTILE, outfile):
QP_AFs_percentile = {}
for key, value in QP_AFs.items():
QP_AFs_percentile[key] = np.percentile(value, PERCENTILE)
QP_AFs_percentile = OrderedDict(sorted(QP_AFs_percentile.items(), key = itemgetter(1), reverse = True))
fp = open(os.path.join(out_dir, outfile+"_PERCENTILE_"+str(PERCENTILE)+".csv"), 'w')
for key, value in QP_AFs_percentile.items():
fp.write("{},{}\n".format(key, value))
fp.close()
return QP_AFs_percentile
# In[6]:
# map one query (currow) to a list of QPs
# Input: one query
# Output: a list of matching QPs index
def map_query_to_QP(currow):
newrow=[]
for row_sig in signatures:
e1=row_sig[0]
if e1 in ['url']:
continue
e2=hc[e1]
# print(e1,e2)
if(e2!=2):
newrow.append(currow[e1])
else:
sigvals=col_info[e1]
# print(sigvals)
curval = currow[e1]
for i,rr in enumerate(sigvals):
if curval in rr[0]:
newrow.append(i)
break
# print(currow,newrow,new_sigs[0,:])
newrow=np.array(newrow)
# print(newrow.shape)
curmatches=[]
for signum,sig in enumerate(new_sigs):
match = 1
for k in range(0,newrow.shape[0]):
sigin=k
v1=newrow[k]
cur_col=sig[sigin]
# print(v1,cur_col)
if '-1' in cur_col:
continue
# match=1
# else:
if str(v1) not in cur_col:
match=0
break
if(match==1):
curmatches.append(signum)
# aux_array.
curAF=currow[0]
# print("ROW : ",newrow)
# print("curmatches: ", curmatches)
# for matches in new_sigs[curmatches]:
# print("matches: ", matches)
return curmatches
# In[7]:
# convert a list of tuples to dict
def merge_tuples(tuple_list):
results = {}
for item in tuple_list:
key = item[0]
value = item[1]
if key not in results:
results[key] = []
results[key].append(value)
return results
def read_from_json(filename):
with open(filename, 'r') as fp:
dict_ = json.load( fp )
return dict_
def output_dict_to_json(dict_, filename):
results = {}
results["children"] = []
for key, value in dict_.items():
result = {"Name": "QP "+str(key), "Count": round(value, 2)}
results["children"].append(result)
with open(filename, 'w') as fp:
json.dump(results, fp)
def output_dict_to_json_v2(dict_, filename):
with open(filename, 'w') as fp:
json.dump(dict_, fp)
# AForCount : 0: AF, 1: Count
def output_dict_to_csv(dict_, filename, AForCount):
with open(filename, 'w') as fp:
if AForCount == 0:
fp.write("QP_index,meanAF\n")
elif AForCount == 1:
fp.write("QP_index,count\n")
elif AForCount == 2:
fp.write("QP_index,medianAF\n")
for key, value in dict_.items():
fp.write("{},{}\n".format(key, value))
def output_json_to_html(infile, outfile, AForCount):
fr = open("bubble_plot.html", 'r')
fw = open(os.path.join(proto_dir, "depth_"+str(DEPTH)+"_"+outfile), 'w')
print(os.path.join(proto_dir, "depth_"+str(DEPTH)+"_"+outfile))
infile = "depth_"+str(DEPTH)+"/"+infile
for line in fr:
if (line.strip().startswith("d3.json")):
fw.write("\t\td3.json(\"%s\", function(dataset) {\n"%infile)
elif (line.strip().startswith("var diameter")):
if AForCount == 0:
fw.write("\t\t\tvar diameter = 800\n")
elif AForCount == 1:
fw.write("\t\t\tvar diameter = 600\n")
else:
fw.write(line)
fr.close()
fw.close()
def output_QP_stats(QP_AFs):
QP_mean_AF = {}
QP_occ = {}
QP_percent = {}
total_len = 0
for key, value in QP_AFs.items():
QP_mean_AF[key] = np.mean(value)
QP_occ[key] = len(value)
total_len += len(value)
for key, value in QP_occ.items():
QP_percent[key] = float(value)/float(total_len)
QP_mean_AF = OrderedDict(sorted(QP_mean_AF.items(), key = itemgetter(1), reverse = True))
QP_occ = OrderedDict(sorted(QP_occ.items(), key = itemgetter(1), reverse = True))
QP_percent = OrderedDict(sorted(QP_percent.items(), key = itemgetter(1), reverse = True))
return QP_mean_AF, QP_occ, QP_percent
# In[24]:
# box plot for top FIVE QPs
# pick TOP by MEAN AF
# box plot for top FIVE QPs
# pick TOP by MEAN AF
def QP_boxplot(QP_AFs, QP_mean_AF, topK, outfile, title, rank_by):
assert(len(QP_AFs) == len(QP_mean_AF))
top_index_num = min(len(QP_mean_AF), topK)
#print("top index num" , top_index_num)
#print("list ",list(QP_mean_AF.keys()))
top_index = list(QP_mean_AF.keys())[:top_index_num]
#print(top_index)
data = []
xlabels = []
nll=[]
plt.style.use(['seaborn-whitegrid', 'seaborn-paper'])
df = pd.DataFrame(columns=['QPs', 'value'])
rowlist=[]
# dict={}
for index in top_index:
values=QP_AFs[index]
for e1 in values:
curd={}
curd['QP']="QP"+str(index)
curd['AF'] = e1
rowlist.append(curd)
# print(rowlist)
df = pd.DataFrame(rowlist)
# print(df.head())
# ()+1
# data.append(QP_AFs[index])
# xlabels.append("QP "+str(index))
# curd
# nll.append
# print(xlabels)
# print(data)
plt.clf()
plt.figure(figsize=(20, 5))
ax = sns.boxplot(x="QP", y="AF", data=df, linewidth=4, palette="Set2",dodge=True,showmeans=True ) # figsize=(15,6))
#ax = sns.boxplot(x='mode',y='count',hue='design',data=df1,linewidth=4, palette="Set2",dodge=True,showmeans=True )
# plt.boxplot(data)
ax.set_xticks([i for i in range(top_index_num)], xlabels) #, fontsize=18)
#ax.set_xticklabels([i for i in range(top_index_num)], xlabels, fontsize=18)
ax.set_ylabel("Amplification Factor", fontsize=24)
ax.set_xlabel("Query Patterns (QP) ranked by {}".format(rank_by), fontsize=25, labelpad=20)
ax.tick_params(axis='x', labelsize=21)
ax.tick_params(axis='y', labelsize=23)
#plt.title(title)
plt.savefig(outfile,bbox_inches='tight')
for DEPTH in depths:
print("DEPTH: ", DEPTH)
########### DEPTH QP ######################################
proto_dir = os.path.join(plot_root_dir , args.proto_folder) # "yucheng_plots/"+ args.proto
if not os.path.exists(proto_dir):
os.makedirs(proto_dir)
#out_dir = plot_root_dir + "/" + PROTO + "/depth_"+str(DEPTH) # "yucheng_plots/"+ args.proto +"/depth_"+str(DEPTH)
out_dir = proto_dir + "/depth_"+str(DEPTH) # "yucheng_plots/"+ args.proto +"/depth_"+str(DEPTH)
#out_dir = plot_root_dir + "/" + args.proto_folder + "/depth_"+str(DEPTH)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if SetCover_True == True:
print(depth_pq_file)
depth_QPs_dict = np.load( depth_pq_file, allow_pickle=True )
QPs_set = depth_QPs_dict.item()
QPs = QPs_set[DEPTH]
QPs = QPs.applymap(str)
else:
depth_QPs_dict = np.load( depth_pq_file , allow_pickle=True )
QPs = pd.read_csv(depth_minus1_file)
QPs.sort_values(by=['amp_fac'], ascending=False,inplace=True)
print(QPs)
#depth_QPs_dict = np.load("DAG_QPS_depth.npy")
##############################################################
# load queries
all_queries= | pd.read_csv(Queries_filename) | pandas.read_csv |
import pandas as pd
import itertools as it
import seaborn as sns
import numpy as np
from pymea import matlab_compatibility as mc
from matplotlib import pyplot as plt
from matplotlib import mlab as mlab
import random
from datetime import datetime, timedelta
def filter_neurons_homeostasis(cat_table, baseline_table, stim_table, ind_filter = True, var=10, minHz = 0.001, maxHz = 100000, foldMin = 0.001, filter_wells = False, data_col = 'spike_freq'):
'''
Returns a cat_table only including neurons that pass the filters for min/maxHz, baseline "var", staying alive
throughout the experiment, and responding to drug.
'''
c_filter = pd.DataFrame()
b_filter = pd.DataFrame()
count_real = 0
count_live = 0
count_final = 0
last_time = max(cat_table['time'])
#filter individual neurons based on baseline firing, whether they stay alive, and whether they respond to stim
for cond in cat_table['condition'].unique():
c = cat_table.query('condition == "%s"'%cond)
b = baseline_table.query('condition == "%s"'%cond)
s = stim_table.query('condition == "%s"'%cond)
c_filter_cond = pd.DataFrame()
b_filter_cond = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDescribe:
def test_describe_bool_in_mixed_frame(self):
df = DataFrame(
{
"string_data": ["a", "b", "c", "d", "e"],
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
}
)
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame(
{"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=["bool"])
expected = DataFrame(
{"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
)
tm.assert_frame_equal(result, expected)
def test_describe_empty_object(self):
# GH#27183
df = pd.DataFrame({"A": [None, None]}, dtype=object)
result = df.describe()
expected = pd.DataFrame(
{"A": [0, 0, np.nan, np.nan]},
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
result = df.iloc[:0].describe()
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH#13891
df = pd.DataFrame(
{
"bool_data_1": [False, False, True, True],
"bool_data_2": [False, True, True, True],
}
)
result = df.describe()
expected = DataFrame(
{"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{
"bool_data": [False, False, True, True, False],
"int_data": [0, 1, 2, 3, 4],
}
)
result = df.describe()
expected = DataFrame(
{"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
)
result = df.describe()
expected = DataFrame(
{"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(
["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_empty_categorical_column(self):
# GH#26397
# Ensure the index of an an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = pd.DataFrame({"empty_col": Categorical([])})
result = df.describe()
expected = DataFrame(
{"empty_col": [0, 0, np.nan, np.nan]},
index=["count", "unique", "top", "freq"],
dtype="object",
)
tm.assert_frame_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2, 0])
assert np.isnan(result.iloc[3, 0])
def test_describe_categorical_columns(self):
# GH#11558
columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
df = DataFrame(
{
"int1": [10, 20, 30, 40, 50],
"int2": [10, 20, 30, 40, 50],
"obj": ["A", 0, None, "X", 1],
},
columns=columns,
)
result = df.describe()
exp_columns = pd.CategoricalIndex(
["int1", "int2"],
categories=["int1", "int2", "obj"],
ordered=True,
name="XXX",
)
expected = DataFrame(
{
"int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
"int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
columns=exp_columns,
)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values, expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"],
freq="MS",
tz="US/Eastern",
name="XXX",
)
df = DataFrame(
{
0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ["A", 0, None, "X", 1],
}
)
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
)
expected = DataFrame(
{
0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == "MS"
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH#6145
t1 = pd.timedelta_range("1 days", freq="D", periods=5)
t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
df = pd.DataFrame({"t1": t1, "t2": t2})
expected = DataFrame(
{
"t1": [
5,
pd.Timedelta("3 days"),
df.iloc[:, 0].std(),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.Timedelta("4 days"),
pd.Timedelta("5 days"),
],
"t2": [
5,
pd.Timedelta("3 hours"),
df.iloc[:, 1].std(),
pd.Timedelta("1 hours"),
| pd.Timedelta("2 hours") | pandas.Timedelta |
"""Unit tests for csv_importer.py."""
# standard library
import unittest
from unittest.mock import MagicMock
# third party
import pandas
# py3tester coverage target
__test_target__ = 'delphi.epidata.acquisition.covidcast.csv_importer'
class UnitTests(unittest.TestCase):
"""Basic unit tests."""
def test_is_sane_day(self):
"""Sanity check some dates."""
self.assertTrue(CsvImporter.is_sane_day(20200418))
self.assertFalse(CsvImporter.is_sane_day(22222222))
self.assertFalse(CsvImporter.is_sane_day(20200001))
self.assertFalse(CsvImporter.is_sane_day(20200199))
self.assertFalse(CsvImporter.is_sane_day(202015))
def test_is_sane_week(self):
"""Sanity check some weeks."""
self.assertTrue(CsvImporter.is_sane_week(202015))
self.assertFalse(CsvImporter.is_sane_week(222222))
self.assertFalse(CsvImporter.is_sane_week(202000))
self.assertFalse(CsvImporter.is_sane_week(202054))
self.assertFalse(CsvImporter.is_sane_week(20200418))
def test_find_csv_files(self):
"""Recursively explore and find CSV files."""
path_prefix = 'prefix/to/the/data/'
glob_paths = [
# valid weekly
path_prefix + 'fb_survey/weekly_202015_county_cli.csv',
# valid daily
path_prefix + 'ght/20200408_state_rawsearch.csv',
# valid national
path_prefix + 'valid/20200408_nation_sig.csv',
# invalid
path_prefix + 'invalid/hello_world.csv',
# invalid day
path_prefix + 'invalid/22222222_b_c.csv',
# invalid week
path_prefix + 'invalid/weekly_222222_b_c.csv',
# invalid geography
path_prefix + 'invalid/20200418_province_c.csv',
# ignored
path_prefix + 'ignored/README.md',
]
mock_glob = MagicMock()
mock_glob.glob.return_value = glob_paths
found = set(CsvImporter.find_csv_files(path_prefix, glob=mock_glob))
expected_issue_day=int(date.today().strftime("%Y%m%d"))
expected_issue_week=int(str(epi.Week.fromdate(date.today())))
time_value_day = 20200408
expected = set([
(glob_paths[0], ('fb_survey', 'cli', 'week', 'county', 202015, expected_issue_week, delta_epiweeks(202015, expected_issue_week))),
(glob_paths[1], ('ght', 'rawsearch', 'day', 'state', time_value_day, expected_issue_day, (date.today() - date(year=time_value_day // 10000, month=(time_value_day // 100) % 100, day=time_value_day % 100)).days)),
(glob_paths[2], ('valid', 'sig', 'day', 'nation', time_value_day, expected_issue_day, (date.today() - date(year=time_value_day // 10000, month=(time_value_day // 100) % 100, day=time_value_day % 100)).days)),
(glob_paths[3], None),
(glob_paths[4], None),
(glob_paths[5], None),
(glob_paths[6], None),
])
self.assertEqual(found, expected)
def test_is_header_valid_allows_extra_columns(self):
"""Allow and ignore extra columns in the header."""
columns = CsvImporter.REQUIRED_COLUMNS
self.assertTrue(CsvImporter.is_header_valid(columns))
self.assertTrue(CsvImporter.is_header_valid(columns | {'foo', 'bar'}))
def test_is_header_valid_does_not_depend_on_column_order(self):
"""Allow columns to appear in any order."""
# sorting changes the order of the columns
columns = sorted(CsvImporter.REQUIRED_COLUMNS)
self.assertTrue(CsvImporter.is_header_valid(columns))
def test_floaty_int(self):
"""Parse ints that may look like floats."""
self.assertEqual(CsvImporter.floaty_int('-1'), -1)
self.assertEqual(CsvImporter.floaty_int('-1.0'), -1)
with self.assertRaises(ValueError):
CsvImporter.floaty_int('-1.1')
def test_maybe_apply(self):
"""Apply a function to a value as long as it's not null-like."""
self.assertEqual(CsvImporter.maybe_apply(float, '3.14'), 3.14)
self.assertEqual(CsvImporter.maybe_apply(int, '1'), 1)
self.assertIsNone(CsvImporter.maybe_apply(int, 'NA'))
self.assertIsNone(CsvImporter.maybe_apply(int, 'NaN'))
self.assertIsNone(CsvImporter.maybe_apply(float, ''))
self.assertIsNone(CsvImporter.maybe_apply(float, None))
def test_extract_and_check_row(self):
"""Apply various sanity checks to a row of data."""
def make_row(
geo_type='state',
geo_id='vi',
val='1.23',
se='4.56',
sample_size='100.5'):
row = MagicMock(
geo_id=geo_id,
val=val,
se=se,
sample_size=sample_size)
return geo_type, row
# cases to test each failure mode
failure_cases = [
(make_row(geo_type='county', geo_id='1234'), 'geo_id'),
(make_row(geo_type='county', geo_id='00000'), 'geo_id'),
(make_row(geo_type='hrr', geo_id='600'), 'geo_id'),
(make_row(geo_type='msa', geo_id='1234'), 'geo_id'),
(make_row(geo_type='msa', geo_id='01234'), 'geo_id'),
(make_row(geo_type='dma', geo_id='400'), 'geo_id'),
(make_row(geo_type='state', geo_id='48'), 'geo_id'),
(make_row(geo_type='state', geo_id='iowa'), 'geo_id'),
(make_row(geo_type='nation', geo_id='0000'), 'geo_id'),
(make_row(geo_type='province', geo_id='ab'), 'geo_type'),
(make_row(se='-1'), 'se'),
(make_row(geo_type=None), 'geo_type'),
(make_row(geo_id=None), 'geo_id'),
(make_row(val=None), 'val'),
(make_row(val='nan'), 'val'),
(make_row(val='NaN'), 'val'),
(make_row(geo_type='hrr', geo_id='hrr001'), 'geo_id'),
(make_row(val='val'), 'val'),
(make_row(se='se'), 'se'),
(make_row(sample_size='sample_size'), 'sample_size'),
]
for ((geo_type, row), field) in failure_cases:
values, error = CsvImporter.extract_and_check_row(row, geo_type)
self.assertIsNone(values)
self.assertEqual(error, field)
# a nominal case without missing values
geo_type, row = make_row()
values, error = CsvImporter.extract_and_check_row(row, geo_type)
self.assertIsInstance(values, CsvImporter.RowValues)
self.assertEqual(str(values.geo_value), row.geo_id)
self.assertEqual(str(values.value), row.val)
self.assertEqual(str(values.stderr), row.se)
self.assertEqual(str(values.sample_size), row.sample_size)
self.assertIsNone(error)
# a nominal case with missing values
geo_type, row = make_row(se='', sample_size='NA')
values, error = CsvImporter.extract_and_check_row(row, geo_type)
self.assertIsInstance(values, CsvImporter.RowValues)
self.assertEqual(str(values.geo_value), row.geo_id)
self.assertEqual(str(values.value), row.val)
self.assertIsNone(values.stderr)
self.assertIsNone(values.sample_size)
self.assertIsNone(error)
def test_load_csv_with_invalid_header(self):
"""Bail loading a CSV when the header is invalid."""
data = {'foo': [1, 2, 3]}
mock_pandas = MagicMock()
mock_pandas.read_csv.return_value = pandas.DataFrame(data=data)
filepath = 'path/name.csv'
geo_type = 'state'
rows = list(CsvImporter.load_csv(filepath, geo_type, pandas=mock_pandas))
self.assertTrue(mock_pandas.read_csv.called)
self.assertTrue(mock_pandas.read_csv.call_args[0][0], filepath)
self.assertEqual(rows, [None])
def test_load_csv_with_valid_header(self):
"""Yield sanity checked `RowValues` from a valid CSV file."""
# one invalid geo_id, but otherwise valid
data = {
'geo_id': ['ca', 'tx', 'fl', '123'],
'val': ['1.1', '1.2', '1.3', '1.4'],
'se': ['2.1', '2.2', '2.3', '2.4'],
'sample_size': ['301', '302', '303', '304'],
}
mock_pandas = MagicMock()
mock_pandas.read_csv.return_value = | pandas.DataFrame(data=data) | pandas.DataFrame |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_label": None}
assert encoder.random_seed == 0
def test_label_encoder_fit_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
with pytest.raises(ValueError, match="y cannot be None"):
encoder.fit(X)
encoder.fit(X, y)
with pytest.raises(ValueError, match="y cannot be None"):
encoder.inverse_transform(None)
def test_label_encoder_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X)
assert_frame_equal(X, X_t)
assert y_t is None
def test_label_encoder_fit_transform_with_numeric_values_does_not_encode():
X = pd.DataFrame({})
# binary
y = pd.Series([0, 1, 1, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
# multiclass
X = pd.DataFrame({})
y = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
def test_label_encoder_fit_transform_with_numeric_values_needs_encoding():
X = pd.DataFrame({})
# binary
y = pd.Series([2, 1, 2, 1])
y_expected = pd.Series([1, 0, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series([0, 1, 1, 3, 0, 3])
y_expected = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_with_categorical_values():
X = pd.DataFrame({})
# binary
y = pd.Series(["b", "a", "b", "b"])
y_expected = pd.Series([1, 0, 1, 1])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series(["c", "a", "b", "c", "d"])
y_expected = pd.Series([2, 0, 1, 2, 3])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_equals_fit_and_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder()
X_fit_transformed, y_fit_transformed = encoder.fit_transform(X, y)
encoder_duplicate = LabelEncoder()
encoder_duplicate.fit(X, y)
X_transformed, y_transformed = encoder_duplicate.transform(X, y)
assert_frame_equal(X_fit_transformed, X_transformed)
assert_series_equal(y_fit_transformed, y_transformed)
def test_label_encoder_inverse_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
y_expected = ww.init_series(y)
encoder = LabelEncoder()
_, y_fit_transformed = encoder.fit_transform(X, y)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(y_expected, y_inverse_transformed)
y_encoded = | pd.Series([1, 0, 2, 1]) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: 20_data.ipynb (unless otherwise specified).
__all__ = ['read_csv', 'concat', 'add_tables', 'prepare_labrie_data', 'prepare_braverman_data',
'prepare_philander_data', 'generate_bets', 'check_data', 'summarise_app', 'summarise_providers',
'plot_player_career', 'plot_player_career_split', 'plot_provider_dates', 'plot_provider_bets']
# Cell
import pandas as pd
def read_csv(file, parse_dates=[], index_col=None, delimiter=",", dummy_data=False):
"Read csv files into a pandas dataframe."
df = pd.read_csv(
file, parse_dates=parse_dates, index_col=index_col, delimiter=delimiter
)
return df
# Cell
def concat(dfs, axis=0):
"This method is a wrapper of pandas' **concat** function. See [here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) for details."
df = pd.concat(dfs, axis=axis)
return df
# Cell
def add_tables(t1, t2, same_columns=False):
"Joins two tables (the second to the right hand side of the first), adding '_2' to column names if same_columns parameter is True."
if same_columns:
t2.columns = [name + "_2" for name in t2.columns]
combined = pd.concat([t1, t2.reindex(t1.index)], axis=1)
return combined
# Cell
def prepare_labrie_data(filename, savedir="labrie_individuals/", loud=False, year=2008):
"Splits the original labrie data into CSV files for each individual's transactions and renames the columns to be compatable with the rest of the gamba library."
labrie_data = None
if year == 2008:
labrie_data = | pd.read_csv(filename, delimiter="\t", parse_dates=["Date"]) | pandas.read_csv |
# Importing Libraries
import time
import nltk
import pandas as pnda
nltk.download('punkt')
nltk.download('stopwords')
from nltk.tokenize import WhitespaceTokenizer as tknizer
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer as stemmer
import pickle as pckl
COUNT = 6810 # numbe of documents in the database.csv file
class ProcessData:
def __init__(self):
"""
This Class is used to process the Dataset, followed by Normalization and Tokenization.
The goal is to create an indexing list and store it in a pckl file.
"""
self.tokenizer_w = tknizer()
self.stop = stopwords.words('english')
self.ps = stemmer()
def read(self):
'''
Reads the dataset which is in csv format and stores it as a dataframe in df.
Returns the dataframe df.
'''
df = pnda.read_csv('database.csv')
filehandler = open("book_description.obj" , "wb")
pckl.dump(df , filehandler)
filehandler.close()
return df
def LowerCase(self, df):
'''
Converts the text to lower case and replaces NA with ''.
Takes parameters as the dataframe and returns the processed dataframe 'df'
'''
print("Time required for Preprocessing and Tokenizing")
self.start_time = time.time()
# Remove NA values
df = df.fillna('')
# 'data' variable stores column names used to form the corpus
data = ['isbn13', 'isbn10','title','subtitle','authors','categories','thumbnail','description','published_year','average_rating','num_pages','ratings_count']
# Convert all text to Lower Case
for item in data:
df[item] = df[item].astype(str).str.lower()
df = df.fillna('')
return df
def preprocess(self, text):
'''
Removes punctuations and escape sequences.
Takes the text to be modified as a parameter and returns the modified text.
'''
text = text.replace("\n"," ").replace("\r"," ")
text = text.replace("'s"," ")
punctuationList = '!"#$%&\()*+,-./:;<=>?@[\\]^_{|}~'
x = str.maketrans(dict.fromkeys(punctuationList," "))
text = text.translate(x)
return text
def tokenizeHelper(self, text):
'''
Calls the nltk tknizer to tokenize.
Takes parameter as the text and returns the tokenized text.
'''
text = self.preprocess(text)
return self.tokenizer_w.tokenize(text)
def Tokenizer(self, df):
'''
Adds Columns to the dataframe containing the tokens.
Takes parameter as the dataframe and returns the dataframe with columns containing the tokens.
'''
df['TokensDescription'] = df['description'].apply(self.tokenizeHelper)
df['TokensSubtitle'] = df['subtitle'].apply(self.tokenizeHelper)
df['TokensTitle'] = df['title'].apply(self.tokenizeHelper)
df['TokensAuthor'] = df['authors'].apply(self.tokenizeHelper)
df['TokensCategory'] = df['categories'].apply(self.tokenizeHelper)
df['Tokenspublished_year'] = df['published_year'].apply(self.tokenizeHelper)
df['Tokensaverage_rating'] = df['average_rating'].apply(self.tokenizeHelper)
df['Tokensnum_pages'] = df['num_pages'].apply(self.tokenizeHelper)
df['Tokensratings_count'] = df['ratings_count'].apply(self.tokenizeHelper)
# Tokens column stores the tokens for the corresponding document
df['Tokens'] = df['TokensDescription'] + df['TokensSubtitle'] + df['TokensTitle'] + df['TokensAuthor'] + df['TokensCategory'] + df['Tokenspublished_year'] + df['Tokensaverage_rating'] + df['Tokensnum_pages'] + df['Tokensratings_count']
df['Length'] = df.Tokens.apply(len)
df['TitleLength'] = df.TokensTitle.apply(len)
print("--- %s seconds ---" % (time.time() - self.start_time))
return df
def RemoveStopWords(self, df):
'''
This Function removes the stopwords from the Tokens Column in the DataFrame.
Takes the dataframe as a parameter. The changed dataframe is returned.
'''
print("Time required to Remove Stop Words")
self.start_time = time.time()
df['Tokens'] = df['Tokens'].apply(lambda x: [item for item in x if item not in self.stop])
print("--- %s seconds ---" % (time.time() - self.start_time))
return df
def Stemmer(self, df, x):
'''
This Function uses Porter's Stemmer for Stemming.
Takes the dataframe and the column name as parameters.
Stemming is done on the column name x.
Function returns the dataframe 'df'.
'''
print("Time required for Stemming")
self.start_time = time.time()
df['stemmed'] = df[x].apply(lambda x: [self.ps.stem(word) for word in x])
print("--- %s seconds ---" % (time.time() - self.start_time))
return df
def BagOfWords(self, uniqueWords, tokens):
'''
Creates a Dictionary with Keys as words and Values as the word-frequency in the document.
Function parameter : the dataframe column 'Unique_Words' (which stores the unique words per document).
: the dataframe column 'Tokens' (which stores the tokens per document).
Function returns a dictionary called numOfWords.
'''
unique = tuple(uniqueWords)
numOfWords = dict.fromkeys(unique, 0)
for word in tokens:
numOfWords[word] += 1
return numOfWords
def TermFrequency(self, df_tokenized):
'''
Calculates the term frequency of each word document-wise.
Function takes the dataframe as parameters and returns a dataframe with extra columns.
'Unique _Words' column stores unique words per document by using set.
'Frequency' column stores the frequency of each word per document(i.e term frequency).
'''
print("Time required to create the Term Frequency")
self.start_time = time.time()
df_tokenized['Unique_Words'] = df_tokenized['stemmed'].apply(set)
df_tokenized['Frequency'] = df_tokenized.apply(lambda x: self.BagOfWords(x.Unique_Words, x.stemmed), axis = 1)
print("--- %s seconds ---" % (time.time() - self.start_time))
return df_tokenized
def Vocabulary(self, df_tokenized):
'''
Creates Vocabulary for all the documents. i.e Stores all the unique tokens.
Takes dataframe as a parameter and returns the unique words in the entire dataset(stored as Inverted_Index).
Uses a set to calculate unique words for the entire dataset.
'''
print("Time required to create the Inverted Index")
self.start_time = time.time()
Inverted_Index = | pnda.DataFrame() | pandas.DataFrame |
#Analyze statistics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import alphapept.io
import os
import alphapept.io
import seaborn as sns
from tqdm.notebook import tqdm as tqdm
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
from scipy.interpolate import interpn
COLOR_1 = 'r'
COLOR_2 = 'b'
def density_scatter( x , y, ax = None, sort = True, bins = 30, cmap = 'turbo', **kwargs ) :
"""
Scatter plot colored by 2d histogram
Adapted from https://stackoverflow.com/questions/20105364/how-can-i-make-a-scatter-plot-colored-by-density-in-matplotlib
"""
data , x_e, y_e = np.histogram2d( x, y, bins = bins, density = True )
z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data , np.vstack([x,y]).T , method = "splinef2d", bounds_error = False)
#To be sure to plot all data
z[np.where(np.isnan(z))] = 0.0
# Sort the points by density, so that the densest points are plotted last
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
ax.scatter( x, y, c=z, cmap=cmap, **kwargs )
return ax
def prepare_files(path1, path2):
df1 = pd.read_hdf(path1, 'protein_fdr')
# add sequence charge
df1['missed_cleavages'] = df1['sequence'].str[:-1].str.count('K') + df1['sequence'].str[:-1].str.count('R')
#Convert maxquant
df2 = pd.read_csv(path2, sep='\t')
df2['charge'] = df2['Charge']
list_2 = df2['Modified sequence'].values
list_2 = [alphapept.io.parse_mq_seq(_) for _ in list_2]
df2['sequence'] = list_2
df2['precursor'] = ['_'.join(_) for _ in zip(list_2, df2['Charge'].values.astype('int').astype('str'))]
df2['protein'] = df2['Leading razor protein']
df2['decoy'] = df2['Reverse'] == '+'
df2['score'] = df2['Score']
df2['ms1_int_sum'] = df2['Intensity']
df2['missed_cleavages'] = df2['sequence'].str[:-1].str.count('K') + df2['sequence'].str[:-1].str.count('R')
return df1, df2
def compare_field(df1, df2, software_1, software_2, field, exclude_decoy=True):
title_dict = {'protein':'Number of unique proteins',
'sequence': 'Number of unique peptide sequences',
'precursor':'Number of unique sequence/charge combinations',
'charge':'Occurence of charge states',
'digestion':'Occurence of last AA in sequence',
'total_missed_cleavages':'Total number of missed cleavages',
'missed_cleavages':'Ratio of number of of missed cleavages'}#nicer descriptions for the plots
if exclude_decoy:
df1 = df1[~df1['decoy']]
df2 = df2[~df2['decoy']]
#Some pre-defined boundaries
plt.figure(figsize=(5,5))
if field == 'charge':
ratios = df1[field].value_counts() / df2[field].value_counts()
plt.axhline(1, color='k', linestyle=':')
plt.bar(ratios.index, ratios.values, label='Ratio {}/{}'.format(software_1, software_2))
plt.legend()
#bins = np.arange(1,6,0.5)
#plt.hist(df1[field].values, bins=bins,label=software_1)
#plt.hist(df2[field].values-0.5, bins=bins, label=software_2)
plt.legend()
elif (field == 'protein') or (field == 'sequence') or (field == 'precursor'):
plt.bar(software_1, len(set(df1[field])))
plt.bar(software_2, len(set(df2[field])))
elif (field == 'digestion'):
ratios = df1['sequence'].str[-1].value_counts() / df2['sequence'].str[-1].value_counts()
plt.axhline(1, color='k', linestyle=':')
plt.bar(ratios.index, ratios.values, label='Ratio {}/{}'.format(software_1, software_2))
plt.legend()
elif (field == 'total_missed_cleavages'):
field_ = 'missed_cleavages'
plt.bar(software_1, df1[field_].sum())
plt.bar(software_2, df2[field_].sum())
elif (field == 'missed_cleavages'):
ratios = df1[field].value_counts() / df2[field].value_counts()
plt.axhline(1, color='k', linestyle=':')
plt.bar(ratios.index, ratios.values, label='Ratio {}/{}'.format(software_1, software_2))
else:
raise NotImplementedError
plt.title(title_dict[field])
plt.show()
from matplotlib_venn import venn2
def compare_populations(df1, df2, software_1, software_2, field, exclude_decoy=True):
"""
Compare to lists of peptides / proteins
Convention: all should be uppercase
ToDo: check this maybe
"""
title_dict = {'protein':'Shared proteins',
'sequence': 'Shared peptide sequences',
'precursor':'Shared sequence/charge combinations',
}
if exclude_decoy:
df1 = df1[~df1['decoy']]
df2 = df2[~df2['decoy']]
list_1 = df1[field].values
list_2 = df2[field].values
peptides_1 = set(list_1)
peptides_2 = set(list_2)
n_1 = len(peptides_1 - peptides_2)
n_2 = len(peptides_2 - peptides_1)
shared = len(peptides_1.intersection(peptides_2))
venn2(subsets = (n_1, n_2, shared), set_labels = (software_1, software_2))
plt.title(title_dict[field])
plt.show()
def compare_intensities(df1, df2,software_1, software_2):
ref_df1 = df1.copy()
ref_df2 = df2.copy()
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True,sharex=True, figsize=(10,5))
axes = [ax1, ax2]
for idx, _ in enumerate(['protein','precursor']):
ax = axes[idx]
d1 = np.log(ref_df1[[_,'fragments_int_sum']].groupby(_).sum())
d2 = np.log(ref_df2[[_,'ms1_int_sum']].groupby(_).sum())
d2 = d2[~np.isinf(d2['ms1_int_sum'].values)]
shared = set(d1.index.values).intersection(set(d2.index.values))
ax = density_scatter(d1.loc[shared]['fragments_int_sum'].values, d2.loc[shared]['ms1_int_sum'].values, ax = ax, bins=30)
ax.set_xlabel(software_1)
ax.set_ylabel(software_2)
ax.set_title(f"{_} intensity n={len(shared):,}")
mins_ = []
maxs_ = []
for idx, _ in enumerate(['protein','precursor']):
ax = axes[idx]
ylim = ax.get_ylim()
xlim = ax.get_xlim()
mins_.append(ylim[0])
maxs_.append(ylim[1])
mins_.append(xlim[0])
maxs_.append(xlim[1])
min_ = np.min(mins_)
max_ = np.max(maxs_)
for idx, _ in enumerate(['protein','precursor']):
ax = axes[idx]
ax.plot([min_, max_], [min_, max_], 'k:', alpha=0.7)
plt.show()
def protein_rank(df1, df2, software_1, software_2):
data_1 = df1[['protein','fragments_int_sum']].groupby('protein').sum()
data_1 = data_1.sort_values(by='fragments_int_sum', ascending=False) #.head(20)
data_1 = data_1[data_1>0]
data_2 = df2[['Leading proteins','Intensity']].groupby('Leading proteins').sum()
data_2 = data_2.sort_values(by='Intensity', ascending=False) #.head(20)
data_2 = data_2[data_2>0]
data_1 = df1[['protein','fragments_int_sum']].groupby('protein').sum()
data_1 = data_1.sort_values(by='fragments_int_sum', ascending=False) #.head(20)
data_1 = data_1.reset_index()
data_1 = data_1[data_1['fragments_int_sum']>0]
data_2 = df2[['Leading proteins','Intensity']].groupby('Leading proteins').sum()
data_2 = data_2.sort_values(by='Intensity', ascending=False) #.head(20)
data_2 = data_2.reset_index()
data_2 = data_2[data_2['Intensity']>0]
fig, axes = plt.subplots(1, 1, figsize=(5,5), sharex=True,sharey=True)
ax1 = axes
ax1.plot(data_1['fragments_int_sum'].values, label=software_1, color='r')
ax1.axhline(data_1['fragments_int_sum'].min(), color='r', linestyle=':')
ax1.axhline(data_1['fragments_int_sum'].max(), color='r', linestyle=':')
ax1.plot(data_2['Intensity'].values, label=software_2, color='b')
ax1.axhline(data_2['Intensity'].min(), color='b', linestyle=':')
ax1.axhline(data_2['Intensity'].max(), color='b', linestyle=':')
ax1.set_yscale('log')
ax1.legend()
ax1.set_ylabel('Protein Intensity')
plt.show()
def get_plot_df(ref, base_columns, ratio_columns, ax, id_, valid_filter = True):
to_plot = pd.DataFrame()
ref[base_columns] = ref[base_columns].replace(0, np.nan)
ref[ratio_columns] = ref[ratio_columns].replace(0, np.nan)
to_plot['Species'] = ref['Species']
to_plot['base'] = ref[base_columns].median(axis=1)
to_plot['ratio'] = ref[ratio_columns].median(axis=1)
to_plot['base_cnt'] = ref[base_columns].notna().sum(axis=1)
to_plot['ratio_cnt'] = ref[ratio_columns].notna().sum(axis=1)
to_plot['ratio_'] = np.log2(to_plot['base'] / to_plot['ratio'])
to_plot['sum_'] = np.log2(to_plot['ratio'])
if valid_filter:
valid = to_plot.query(f'ratio_cnt >= 2 and base_cnt >=2')
else:
valid = to_plot.query(f'ratio_cnt >0 and base_cnt >0')
homo = valid[valid['Species'] == 'Homo sapiens']
e_coli = valid[valid['Species'] == 'Escherichia coli']
homo_ratio = homo['ratio_'].values
e_coli_ratio = e_coli['ratio_'].values
ax = density_scatter(homo['ratio_'].values, homo['sum_'].values, ax, bins=20, cmap='Reds', alpha=0.5)
ax = density_scatter(e_coli['ratio_'].values, e_coli['sum_'].values, ax, bins=20, cmap='Blues', alpha=0.5)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
homo_ratio_median = np.nanmedian(homo_ratio[~np.isinf(homo_ratio)])
e_coli_ratio_median = np.nanmedian(e_coli_ratio[~np.isinf(e_coli_ratio)])
homo_ratio_std = np.nanstd(homo_ratio[~np.isinf(homo_ratio)])
e_coli_ratio_std = np.nanstd(e_coli_ratio[~np.isinf(e_coli_ratio)])
nl = '\n'
ax.set_title(f'{id_} {nl} Homo (median, std) {homo_ratio_median:.2f}, {homo_ratio_std:.2f} {nl} EColi (median, std) {e_coli_ratio_median:.2f}, {e_coli_ratio_std:.2f} {nl} {valid["Species"].value_counts().to_dict()}')
def get_plot_df_single(ref, base_columns, ratio_columns, ax, id_, valid_filter = True):
to_plot = pd.DataFrame()
ref[base_columns] = ref[base_columns].replace(0, np.nan)
ref[ratio_columns] = ref[ratio_columns].replace(0, np.nan)
to_plot['Species'] = ref['Species']
to_plot['base'] = ref[base_columns].median(axis=1)
to_plot['ratio'] = ref[ratio_columns].median(axis=1)
to_plot['base_cnt'] = ref[base_columns].notna().sum(axis=1)
to_plot['ratio_cnt'] = ref[ratio_columns].notna().sum(axis=1)
to_plot['ratio_'] = np.log2(to_plot['base'] / to_plot['ratio'])
to_plot['sum_'] = np.log2(to_plot['ratio'])
if valid_filter:
valid = to_plot.query(f'ratio_cnt >= 2 and base_cnt >=2')
else:
valid = to_plot.query(f'ratio_cnt >0 and base_cnt >0')
homo = valid[valid['Species'] == 'Homo sapiens']
e_coli = valid[valid['Species'] == 'Escherichia coli']
homo_ratio = homo['ratio_'].values
e_coli_ratio = e_coli['ratio_'].values
ax = density_scatter(valid['ratio_'].values, valid['sum_'].values, ax, bins=30)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
homo_ratio_median = np.nanmedian(homo_ratio[~np.isinf(homo_ratio)])
e_coli_ratio_median = np.nanmedian(e_coli_ratio[~np.isinf(e_coli_ratio)])
homo_ratio_std = np.nanstd(homo_ratio[~np.isinf(homo_ratio)])
e_coli_ratio_std = np.nanstd(e_coli_ratio[~np.isinf(e_coli_ratio)])
nl = '\n'
ax.set_title(f'{id_} {nl} Homo (median, std) {homo_ratio_median:.2f}, {homo_ratio_std:.2f} {nl} EColi (median, std) {e_coli_ratio_median:.2f}, {e_coli_ratio_std:.2f} {nl} {valid["Species"].value_counts().to_dict()}')
def algorithm_test(evd, ref, base_columns, ratio_columns, base_columns2, ratio_columns2, test_id, software_1, software_2):
spec_dict = {}
all_points = []
species_ = []
experiments = evd['Raw file'].unique().tolist()
protein_idx = []
for i in tqdm(range(len(ref))):
investigate = ref.iloc[i]
evd_ids = [int(_) for _ in investigate['Evidence IDs'].split(';')]
species = investigate['Species']
subset = evd.loc[evd_ids].copy()
field_ = 'Intensity'
subset['protein'] = 'X'
subset['filename'] = subset['Raw file']
subset['precursor'] = ['_'.join(_) for _ in zip(subset['Modified sequence'].values, subset['Charge'].values.astype('str'))]
protein = 'X'
from alphapept.quantification import protein_profile
profile, pre_lfq, experiment_ids, protein = protein_profile(subset, experiments, field_, protein)
xx = pd.DataFrame([profile, pre_lfq], columns=experiment_ids).T
xx.columns = ['lfq_ap', 'int_ap']
all_points.append(xx[['lfq_ap']].T)
protein_idx.append(i)
species_.append(species)
df = | pd.concat(all_points) | pandas.concat |
import datetime as dt
import os
import pickle
from typing import Dict, List
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import activations
from dl_portfolio.logger import LOGGER
from dl_portfolio.data import get_features
from dl_portfolio.pca_ae import build_model
from dl_portfolio.regularizers import WeightsOrthogonality
from dl_portfolio.regressors.nonnegative_linear.ridge import NonnegativeRidge
from dl_portfolio.regressors.nonnegative_linear.base import NonnegativeLinear
from dl_portfolio.constant import BASE_FACTOR_ORDER_DATASET2, BASE_FACTOR_ORDER_DATASET1
from sklearn.linear_model import LinearRegression, Lasso
LOG_BASE_DIR = './dl_portfolio/log'
def build_linear_model(ae_config, reg_type: str, **kwargs):
if reg_type == 'nn_ridge':
if ae_config.l_name == 'l2':
alpha = kwargs.get('alpha', ae_config.l)
kwargs['alpha'] = alpha
else:
alpha = kwargs.get('alpha')
assert alpha is not None
model = NonnegativeRidge(**kwargs)
elif reg_type == 'nn_ls_custom':
model = NonnegativeLinear()
elif reg_type == 'nn_ls':
model = LinearRegression(positive=True, fit_intercept=False, **kwargs)
elif reg_type == 'nn_lasso':
if ae_config.l_name == 'l1':
alpha = kwargs.get('alpha', ae_config.l)
kwargs['alpha'] = alpha
else:
alpha = kwargs.get('alpha')
assert alpha is not None
model = Lasso(positive=True, fit_intercept=False, **kwargs)
else:
raise NotImplementedError(reg_type)
return model
def fit_nnls_one_cv(cv: int, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str,
ae_config, reg_type: str = 'nn_ridge', **kwargs):
model, scaler, dates, test_data, test_features, prediction, embedding, decoding = load_result(ae_config,
test_set,
data,
assets,
base_dir,
cv)
prediction -= scaler['attributes']['mean_']
prediction /= np.sqrt(scaler['attributes']['var_'])
mse_or = np.mean((test_data - prediction) ** 2, 0)
relu_activation_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
relu_activation = relu_activation_layer.predict(test_data)
relu_activation = pd.DataFrame(relu_activation, index=prediction.index)
# Fit linear encoder to the factors
# input_dim = model.layers[0].input_shape[0][-1]
# encoding_dim = model.layers[1].output_shape[-1]
# vlin_encoder = create_linear_encoder_with_constraint(input_dim, encoding_dim)
# lin_encoder.fit(test_data_i, relu_activation_i, batch_size = 1, epochs=500, verbose=2,
# max_queue_size=20, workers=2*os.cpu_count()-1, use_multiprocessing=True)
# factors_nnls_i = lin_encoder.predict(test_data_i)
# lin_embedding = pd.DataFrame(encoder.layers[1].weights[0].numpy(), index=embed.index)
# # Fit non-negative linear least square to the factor
reg_nnls = build_linear_model(ae_config, reg_type, **kwargs)
x = test_data.copy()
mean_ = np.mean(x, 0)
# Center the data as we do not fit intercept
x = x - mean_
reg_nnls.fit(x, relu_activation)
# Now compute intercept: it is just the mean of the dependent variable
intercept_ = np.mean(relu_activation).values
factors_nnls = reg_nnls.predict(x) + intercept_
factors_nnls = pd.DataFrame(factors_nnls, index=prediction.index)
# Get reconstruction error based on nnls embedding
if ae_config.model_type == "pca_ae_model":
# For PCA AE model encoder and decoder share weights
weights = reg_nnls.coef_.copy()
# Compute bias (reconstruction intercept)
bias = mean_ - np.dot(np.mean(factors_nnls, 0), weights)
elif ae_config.model_type == "ae_model":
weights = model.get_layer('decoder').get_weights()[0]
bias = model.get_layer('decoder').get_weights()[1]
else:
raise NotImplementedError(ae_config.model_type)
# Reconstruction
pred_nnls_model = np.dot(factors_nnls, weights) + bias
mse_nnls_model = np.mean((test_data - pred_nnls_model) ** 2, 0)
# pred_nnls_factors = pd.concat([pred_nnls_factors, pd.DataFrame(pred_nnls_factors_i,
# columns=pred.columns,
# index=pred.index)])
pred_nnls_model = pd.DataFrame(pred_nnls_model, columns=prediction.columns, index=prediction.index)
test_data = pd.DataFrame(test_data, columns=prediction.columns, index=prediction.index)
reg_coef = pd.DataFrame(weights.T, index=embedding.index)
return test_data, embedding, decoding, reg_coef, relu_activation, factors_nnls, prediction, pred_nnls_model, mse_or, mse_nnls_model
def get_nnls_analysis(test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, ae_config,
reg_type: str = 'nn_ridge', **kwargs):
"""
:param test_set:
:param data:
:param assets:
:param base_dir:
:param ae_config:
:param reg_type: regression type to fit "nn_ridge" for non negative Ridge or "nn_ls" for non negative LS
:return:
"""
test_data = pd.DataFrame()
prediction = pd.DataFrame()
# pred_nnls_factors = pd.DataFrame()
pred_nnls_model = pd.DataFrame()
factors_nnls = pd.DataFrame()
relu_activation = pd.DataFrame()
embedding = {}
decoding = {}
reg_coef = {}
mse = {
'original': [],
'nnls_factors': [],
'nnls_model': []
}
# cv = 0
for cv in ae_config.data_specs:
LOGGER.info(f'CV: {cv}')
test_data_i, embedding_i, decoding_i, reg_coef_i, relu_activation_i, factors_nnls_i, pred, pred_nnls_model_i, mse_or, mse_nnls_model = fit_nnls_one_cv(
cv,
test_set,
data,
assets,
base_dir,
ae_config,
reg_type=reg_type,
**kwargs)
embedding[cv] = embedding_i
decoding[cv] = decoding_i
reg_coef[cv] = reg_coef_i
relu_activation = pd.concat([relu_activation, relu_activation_i])
factors_nnls = pd.concat([factors_nnls, factors_nnls_i])
prediction = pd.concat([prediction, pred])
pred_nnls_model = pd.concat([pred_nnls_model, pred_nnls_model_i])
test_data = pd.concat([test_data, test_data_i])
mse['original'].append(mse_or)
mse['nnls_model'].append(mse_nnls_model)
results = {
'test_data': test_data,
'prediction': prediction,
# 'pred_nnls_factors': pred_nnls_factors,
'pred_nnls_model': pred_nnls_model,
'factors_nnls': factors_nnls,
'relu_activation': relu_activation,
'mse': mse,
'embedding': embedding,
'decoding': decoding,
'reg_coef': reg_coef
}
return results
def reorder_columns(data, new_order):
return data.iloc[:, new_order]
def load_result_wrapper(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str,
reorder_features: bool = True, first_cv=None):
test_data = pd.DataFrame()
prediction = pd.DataFrame()
features = pd.DataFrame()
relu_activation = pd.DataFrame()
residuals = pd.DataFrame()
embedding = {}
decoding = {}
cvs = list(config.data_specs.keys())
if first_cv:
cvs = [cv for cv in cvs if cv >= first_cv]
for cv in cvs:
embedding[cv] = {}
model, scaler, dates, t_data, f, pred, embed, decod, relu_act = load_result(config,
test_set,
data,
assets,
base_dir,
cv,
reorder_features)
t_data = pd.DataFrame(t_data, columns=pred.columns, index=pred.index)
t_data *= scaler["attributes"]["scale_"]
t_data += scaler["attributes"]["mean_"]
test_data = pd.concat([test_data, t_data])
prediction = pd.concat([prediction, pred])
features = pd.concat([features, f])
if relu_act is not None:
relu_activation = pd.concat([relu_activation, relu_act])
residuals = pd.concat([residuals, t_data - pred])
embedding[cv] = embed
decoding[cv] = decod
return test_data, prediction, features, residuals, embedding, decoding, relu_activation
def get_linear_encoder(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, cv: str,
reorder_features=True):
"""
:param model_type: 'ae' or 'nmf'
:param test_set:
:param data:
:param assets:
:param base_dir:
:param cv:
:param ae_config:
:return:
"""
model_type = config.model_type
assert model_type in ["pca_ae_model", "ae_model", "convex_nmf", "semi_nmf"]
assert test_set in ["train", "val", "test"]
scaler = pickle.load(open(f'{base_dir}/{cv}/scaler.p', 'rb'))
input_dim = len(assets)
model, encoder, extra_features = build_model(config.model_type,
input_dim,
config.encoding_dim,
n_features=None,
extra_features_dim=1,
activation=config.activation,
batch_normalization=config.batch_normalization,
kernel_initializer=config.kernel_initializer,
kernel_constraint=config.kernel_constraint,
kernel_regularizer=config.kernel_regularizer,
activity_regularizer=config.activity_regularizer,
loss=config.loss,
uncorrelated_features=config.uncorrelated_features,
weightage=config.weightage)
model.load_weights(f'{base_dir}/{cv}/model.h5')
layer_name = list(filter(lambda x: 'uncorrelated_features_layer' in x, [l.name for l in model.layers]))[0]
encoder = tf.keras.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
dense_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
dense_layer.layers[-1].activation = activations.linear
assert dense_layer.layers[-1].activation == activations.linear
assert encoder.layers[1].activation == activations.linear
data_spec = config.data_specs[cv]
if test_set == 'test':
_, _, test_data, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'val':
_, test_data, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'train':
# For first cv: predict on train data then for the others used previous validation data for prediction
test_data, _, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
else:
raise NotImplementedError(test_set)
# Prediction
test_features = encoder.predict(test_data)
lin_activation = dense_layer.predict(test_data)
index = dates[test_set]
test_features = pd.DataFrame(test_features, index=index)
lin_activation = pd.DataFrame(lin_activation, index=index)
if reorder_features:
embedding = | pd.read_pickle(f'{base_dir}/{cv}/encoder_weights.p') | pandas.read_pickle |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
| tm.assert_frame_equal(result, df) | pandas._testing.assert_frame_equal |
from wonambi import Dataset
from wonambi.detect import DetectSpindle, DetectSlowWave
from mednickdb_pysleep import pysleep_utils
from mednickdb_pysleep import pysleep_defaults
from mednickdb_pysleep.error_handling import EEGError
from typing import List, Tuple, Dict, Union
import pandas as pd
import numpy as np
import warnings
import datetime
import os
import contextlib
import sys
import logging
import inspect
try:
logger = inspect.currentframe().f_back.f_globals['logger']
except KeyError:
logger = logging.getLogger('errorlog')
logger.info = print#
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
if 'skip_rem' not in os.environ:
if pysleep_defaults.load_matlab_detectors:
try:
import yetton_rem_detector
yetton_rem_detector.initialize_runtime(['-nojvm', '-nodisplay'])
rem_detector = yetton_rem_detector.initialize()
except ModuleNotFoundError:
pysleep_defaults.load_matlab_detectors = False
def extract_features(edf_filepath: str,
epochstages: List[str],
epochoffset_secs: Union[float, None]=None,
end_offset: Union[float, None]=None,
do_slow_osc: bool=True,
do_spindles: bool=True,
chans_for_spindles: List[str]=None,
chans_for_slow_osc: List[str]=None,
epochs_with_artifacts: List[int]=None,
do_rem: bool=False,
spindle_algo: str='Wamsley2012',
do_overlap=True,
timeit=False):
"""
Run full feature extraction (rem, spindles and SO) on an edf
:param edf_filepath: path to edf file
:param epochstages: epochstages list e.g. ['waso', 'waso', 'n1', 'n2', 'n2', etc]
:param epochoffset_secs: difference between the start of the epochstages and the edf in seconds
:param end_offset: end time to stop extraction for (seconds since start of edf)
:param do_slow_osc: whether to extract slow oscillations or not
:param do_spindles: whether to extract spindles or not
:param do_rem: whether to extract rem or not, note that matlab detectors must be turned on in pysleep_defaults
:param chans_for_spindles: which channels to extract for, can be empty list (no channels), None or all (all channels) or a list of channel names
:param chans_for_slow_osc: which channels to extract for, can be empty list (no channels), None or all (all channels) or a list of channel names
:param epochs_with_artifacts: idx of epochstages that are bad or should be skipped
:param spindle_algo: which spindle algo to run, see the list in Wonambi docs
:return: dataframe of all events, with description, stage, onset (seconds since epochstages start), duration, and feature properties
"""
features_detected = []
start_offset = epochoffset_secs
chans_for_slow_osc = None if chans_for_slow_osc == 'all' else chans_for_slow_osc
chans_for_spindles = None if chans_for_spindles == 'all' else chans_for_spindles
if do_spindles:
if timeit:
starttime = datetime.datetime.now()
logger.info('Spindle Extraction starting for ' + edf_filepath)
data = load_and_slice_data_for_feature_extraction(edf_filepath=edf_filepath,
epochstages=epochstages,
epochoffset_secs=start_offset,
bad_segments=epochs_with_artifacts,
end_offset=end_offset,
chans_to_consider=chans_for_spindles)
spindles = detect_spindles(data, start_offset=start_offset, algo=spindle_algo)
if spindles is None or spindles.shape[0]==0:
logger.warning('No Spindles detected for ' + edf_filepath)
else:
n_spindles = spindles.shape[0]
logger.info('Detected '+ str(n_spindles) + ' spindles on ' + edf_filepath)
if timeit:
logger.info('Spindle extraction took '+str(datetime.datetime.now()-starttime))
donetime = datetime.datetime.now()
spindles = assign_stage_to_feature_events(spindles, epochstages)
assert all(spindles['stage'].isin(pysleep_defaults.nrem_stages)), "All stages must be nrem. If missmatch maybe epochoffset is incorrect?"
if spindles.shape[0]:
features_detected.append(spindles)
if timeit:
logger.info('Bundeling extraction took '+str(datetime.datetime.now()-donetime))
if do_slow_osc:
if timeit:
starttime = datetime.datetime.now()
logger.info('Slow Osc Extraction starting for '+edf_filepath)
if not do_spindles or chans_for_slow_osc != chans_for_spindles:
data = load_and_slice_data_for_feature_extraction(edf_filepath=edf_filepath,
epochstages=epochstages,
epochoffset_secs=start_offset,
bad_segments=epochs_with_artifacts,
end_offset=end_offset,
chans_to_consider=chans_for_slow_osc)
sos = detect_slow_oscillation(data, start_offset=start_offset)
if sos is None:
logger.warning('No SO detected for ' + edf_filepath)
else:
n_sos = sos.shape[0]
logger.info('Detected '+str(n_sos)+ ' slow osc for ' + edf_filepath)
sos = assign_stage_to_feature_events(sos, epochstages)
assert all(sos['stage'].isin(pysleep_defaults.nrem_stages)), "All stages must be nrem. If missmatch maybe epochoffset is incorrect?"
if sos.shape[0]:
features_detected.append(sos)
if timeit:
logger.info('Slow Osc extraction took '+str(datetime.datetime.now()-starttime))
if do_rem:
if not pysleep_defaults.load_matlab_detectors:
warnings.warn('Requested REM, but matlab detectors are turned off. Turn on in pysleep defaults.')
else:
if timeit:
starttime = datetime.datetime.now()
try:
logger.info('REM Extraction starting for '+ edf_filepath)
data = load_and_slice_data_for_feature_extraction(edf_filepath=edf_filepath,
epochstages=epochstages,
epochoffset_secs=start_offset,
bad_segments = epochs_with_artifacts,
end_offset=end_offset,
chans_to_consider=['LOC','ROC'],
stages_to_consider=['rem'])
except ValueError:
warnings.warn('LOC and ROC must be present in the record. Cannot do REM')
rems = None
else:
rems = detect_rems(edf_filepath=edf_filepath, data=data, start_time=start_offset)
if rems is None:
logger.warning('No REM detected for ' + edf_filepath)
else:
rems = assign_stage_to_feature_events(rems, epochstages)
assert all(rems['stage'] == 'rem'), "All stages for rem must be rem. If missmatch maybe epochoffset is incorrect?"
logger.info('Detected '+ str(rems.shape[0]) + ' REMs for ' + edf_filepath)
if rems.shape[0]:
features_detected.append(rems)
if timeit:
logger.info('REM extraction took'+ str(datetime.datetime.now() - starttime))
if features_detected:
sleep_features_df = | pd.concat(features_detected, axis=0, sort=False) | pandas.concat |
import numpy as np
import pandas as pd
import streamlit as st
import importlib
import os
import sys
import time
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
filenames_ = [f for f in filenames if f[-3:] == "txt"]
selected_filename = st.selectbox('Select a file', filenames_)
return os.path.join(folder_path, selected_filename)
st.header("Rocking Data Bytes")
modo = st.sidebar.radio("Modo", options=["Buscar contenido", "Subir contenido", "Configuración"], index=0)
if "METADATA.csv" in os.listdir(".") and "TAGS.csv" in os.listdir("."):
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS = pd.read_csv("./TAGS.csv", index_col=0)
else:
METADATA = pd.DataFrame(np.zeros((1, 5)), index=["INIT"], columns=["TAG_{}".format(i) for i in range(1,6)])
METADATA.to_csv("./METADATA.csv")
TAGS = | pd.DataFrame({"TAGS":["funciones", "machine learning", "visualizacion", "estadistica"]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = pd.DataFrame(dataFeat['user_occupation_id'])
result = result.drop_duplicates(['user_occupation_id'],keep='first')
"1.统计user_occupation出现次数"
dataFeat['user_occupation_count'] = dataFeat['user_occupation_id']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_count',aggfunc='count').reset_index()
del dataFeat['user_occupation_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"2.统计user_occupation历史被购买的次数"
dataFeat['user_occupation_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_occupation_buy_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"3.统计user_occupation转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_occupation_buy_count,result.user_occupation_count))
result['user_occupation_buy_ratio'] = buy_ratio
"4.统计user_occupation历史未被够买的次数"
result['user_occupation_not_buy_count'] = result['user_occupation_count'] - result['user_occupation_buy_count']
return result
def get_context_page_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['context_page_id'])
result = result.drop_duplicates(['context_page_id'],keep='first')
"1.统计context_page出现次数"
dataFeat['context_page_count'] = dataFeat['context_page_id']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_count',aggfunc='count').reset_index()
del dataFeat['context_page_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"2.统计context_page历史被购买的次数"
dataFeat['context_page_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_page_buy_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"3.统计context_page转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_page_buy_count,result.context_page_count))
result['context_page_buy_ratio'] = buy_ratio
"4.统计context_page历史未被够买的次数"
result['context_page_not_buy_count'] = result['context_page_count'] - result['context_page_buy_count']
return result
def get_shop_review_num_level_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['shop_review_num_level'])
result = result.drop_duplicates(['shop_review_num_level'],keep='first')
"1.统计shop_review_num_level出现次数"
dataFeat['shop_review_num_level_count'] = dataFeat['shop_review_num_level']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['shop_review_num_level_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"2.统计shop_review_num_level历史被购买的次数"
dataFeat['shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"3.统计shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_review_num_level_buy_count,result.shop_review_num_level_count))
result['shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计shop_review_num_level历史未被够买的次数"
result['shop_review_num_level_not_buy_count'] = result['shop_review_num_level_count'] - result['shop_review_num_level_buy_count']
return result
def get_item_category_list_2_feat(data,dataFeat):
"item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat['item_category_list_2'])
result = result.drop_duplicates(['item_category_list_2'],keep='first')
"1.统计item_category_list_2出现次数"
dataFeat['item_category_list_2_count'] = dataFeat['item_category_list_2']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['item_category_list_2_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"2.统计item_category_list_2历史被购买的次数"
dataFeat['item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"3.统计item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_category_list_2_buy_count,result.item_category_list_2_count))
result['item_category_list_2_buy_ratio'] = buy_ratio
"4.统计item_category_list_2历史未被够买的次数"
result['item_category_list_2_not_buy_count'] = result['item_category_list_2_count'] - result['item_category_list_2_buy_count']
return result
def get_user_item_feat(data,dataFeat):
"user-item的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_id']])
result = result.drop_duplicates(['user_id','item_id'],keep='first')
"1.统计user-item出现次数"
dataFeat['user_item_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_count',aggfunc='count').reset_index()
del dataFeat['user_item_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"2.统计user-item历史被购买的次数"
dataFeat['user_item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_buy_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"3.统计user-item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_buy_count,result.user_item_count))
result['user_item_buy_ratio'] = buy_ratio
"4.统计user-item历史未被够买的次数"
result['user_item_not_buy_count'] = result['user_item_count'] - result['user_item_buy_count']
return result
def get_user_shop_feat(data,dataFeat):
"user-shop的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_id']])
result = result.drop_duplicates(['user_id','shop_id'],keep='first')
"1.统计user-shop出现次数"
dataFeat['user_shop_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_count',aggfunc='count').reset_index()
del dataFeat['user_shop_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"2.统计user-shop历史被购买的次数"
dataFeat['user_shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"3.统计user-shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_buy_count,result.user_shop_count))
result['user_shop_buy_ratio'] = buy_ratio
"4.统计user-shop历史未被够买的次数"
result['user_shop_not_buy_count'] = result['user_shop_count'] - result['user_shop_buy_count']
return result
def get_user_context_feat(data,dataFeat):
"user-context的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_id']])
result = result.drop_duplicates(['user_id','context_id'],keep='first')
"1.统计user-context出现次数"
dataFeat['user_context_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_count',aggfunc='count').reset_index()
del dataFeat['user_context_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"2.统计user-context历史被购买的次数"
dataFeat['user_context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_buy_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"3.统计user-context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_buy_count,result.user_context_count))
result['user_context_buy_ratio'] = buy_ratio
"4.统计user-context历史未被够买的次数"
result['user_context_not_buy_count'] = result['user_context_count'] - result['user_context_buy_count']
return result
def get_user_timestamp_feat(data,dataFeat):
"user-context_timestamp的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_timestamp']])
result = result.drop_duplicates(['user_id','context_timestamp'],keep='first')
"1.统计user-context_timestamp出现次数"
dataFeat['user_context_timestamp_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['user_context_timestamp_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"2.统计user-context_timestamp历史被购买的次数"
dataFeat['user_context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_timestamp_buy_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"3.统计user-context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_timestamp_buy_count,result.user_context_timestamp_count))
result['user_context_timestamp_buy_ratio'] = buy_ratio
"4.统计user-context_timestamp历史未被够买的次数"
result['user_context_timestamp_not_buy_count'] = result['user_context_timestamp_count'] - result['user_context_timestamp_buy_count']
return result
def get_user_item_brand_feat(data,dataFeat):
"user-item_brand的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_brand_id']])
result = result.drop_duplicates(['user_id','item_brand_id'],keep='first')
"1.统计user-item_brand_id出现次数"
dataFeat['user_item_brand_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_brand_id_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"2.统计user-item_brand_id历史被购买的次数"
dataFeat['user_item_brand_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_brand_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"3.统计user-item_brand_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_brand_id_buy_count,result.user_item_brand_id_count))
result['user_item_brand_id_buy_ratio'] = buy_ratio
"4.统计user-item_brand_id历史未被够买的次数"
result['user_item_brand_id_not_buy_count'] = result['user_item_brand_id_count'] - result['user_item_brand_id_buy_count']
return result
def get_user_user_gender_feat(data,dataFeat):
"user-user_gender的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_gender_id']])
result = result.drop_duplicates(['user_id','user_gender_id'],keep='first')
"1.统计user-user_gender_id出现次数"
dataFeat['user_user_gender_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_gender_id_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"2.统计user-user_gender_id历史被购买的次数"
dataFeat['user_user_gender_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_gender_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"3.统计user-user_gender_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_gender_id_buy_count,result.user_user_gender_id_count))
result['user_user_gender_id_buy_ratio'] = buy_ratio
"4.统计user-user_gender_id历史未被够买的次数"
result['user_user_gender_id_not_buy_count'] = result['user_user_gender_id_count'] - result['user_user_gender_id_buy_count']
return result
def get_user_item_city_feat(data,dataFeat):
"user-item_city的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_city_id']])
result = result.drop_duplicates(['user_id','item_city_id'],keep='first')
"1.统计user-item_city_id出现次数"
dataFeat['user_item_city_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_city_id_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"2.统计user-item_city_id历史被购买的次数"
dataFeat['user_item_city_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_city_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"3.统计user-item_city_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_city_id_buy_count,result.user_item_city_id_count))
result['user_item_city_id_buy_ratio'] = buy_ratio
"4.统计user-item_city_id历史未被够买的次数"
result['user_item_city_id_not_buy_count'] = result['user_item_city_id_count'] - result['user_item_city_id_buy_count']
return result
def get_user_context_page_feat(data,dataFeat):
"user-context_page的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_page_id']])
result = result.drop_duplicates(['user_id','context_page_id'],keep='first')
"1.统计user-context_page_id出现次数"
dataFeat['user_context_page_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_count',aggfunc='count').reset_index()
del dataFeat['user_context_page_id_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"2.统计user-context_page_id历史被购买的次数"
dataFeat['user_context_page_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_page_id_buy_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"3.统计user-context_page_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_page_id_buy_count,result.user_context_page_id_count))
result['user_context_page_id_buy_ratio'] = buy_ratio
"4.统计user-context_page_id历史未被够买的次数"
result['user_context_page_id_not_buy_count'] = result['user_context_page_id_count'] - result['user_context_page_id_buy_count']
return result
def get_user_user_occupation_feat(data,dataFeat):
"user-user_occupation的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_occupation_id']])
result = result.drop_duplicates(['user_id','user_occupation_id'],keep='first')
"1.统计user-user_occupation_id出现次数"
dataFeat['user_user_occupation_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_occupation_id_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"2.统计user-user_occupation_id历史被购买的次数"
dataFeat['user_user_occupation_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_occupation_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"3.统计user-user_occupation_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_occupation_id_buy_count,result.user_user_occupation_id_count))
result['user_user_occupation_id_buy_ratio'] = buy_ratio
"4.统计user-user_occupation_id历史未被够买的次数"
result['user_user_occupation_id_not_buy_count'] = result['user_user_occupation_id_count'] - result['user_user_occupation_id_buy_count']
return result
def get_user_shop_review_num_level_feat(data,dataFeat):
"user-shop_review_num_level的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_review_num_level']])
result = result.drop_duplicates(['user_id','shop_review_num_level'],keep='first')
"1.统计user-shop_review_num_level出现次数"
dataFeat['user_shop_review_num_level_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['user_shop_review_num_level_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"2.统计user-shop_review_num_level历史被购买的次数"
dataFeat['user_shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"3.统计user-shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_review_num_level_buy_count,result.user_shop_review_num_level_count))
result['user_shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计user-shop_review_num_level历史未被够买的次数"
result['user_shop_review_num_level_not_buy_count'] = result['user_shop_review_num_level_count'] - result['user_shop_review_num_level_buy_count']
return result
def get_user_item_category_list_2_feat(data,dataFeat):
"user-item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_category_list_2']])
result = result.drop_duplicates(['user_id','item_category_list_2'],keep='first')
"1.统计user-item_category_list_2出现次数"
dataFeat['user_item_category_list_2_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['user_item_category_list_2_count']
result = pd.merge(result,feat,on=['user_id','item_category_list_2'],how='left')
"2.统计user-item_category_list_2历史被购买的次数"
dataFeat['user_item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['user_id','item_category_list_2'],how='left')
"3.统计user-item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_category_list_2_buy_count,result.user_item_category_list_2_count))
result['user_item_category_list_2_buy_ratio'] = buy_ratio
"4.统计user-item_category_list_2历史未被够买的次数"
result['user_item_category_list_2_not_buy_count'] = result['user_item_category_list_2_count'] - result['user_item_category_list_2_buy_count']
return result
def merge_feat(data,dataFeat):
"特征的merge"
#生成特征
item = get_item_feat(data,dataFeat)
user = get_user_feat(data,dataFeat)
context = get_context_feat(data,dataFeat)
shop = get_shop_feat(data,dataFeat)
timestamp = get_timestamp_feat(data,dataFeat)
item_brand = get_item_brand_feat(data,dataFeat)
user_gender = get_user_gender_feat(data,dataFeat)
item_city = get_item_city_feat(data,dataFeat)
context_page = get_context_page_feat(data,dataFeat)
user_occupation = get_user_occupation_feat(data,dataFeat)
shop_review_num_level = get_shop_review_num_level_feat(data,dataFeat)
item_category_list_2 = get_item_category_list_2_feat(data,dataFeat)
#交互特征
user_item = get_user_item_feat(data,dataFeat)
user_shop = get_user_shop_feat(data,dataFeat)
user_context = get_user_context_feat(data,dataFeat)
user_timestamp = get_user_timestamp_feat(data,dataFeat)
user_item_brand = get_user_item_brand_feat(data,dataFeat)
user_user_gender = get_user_user_gender_feat(data,dataFeat)
user_item_city = get_user_item_city_feat(data,dataFeat)
user_context_page = get_user_context_page_feat(data,dataFeat)
user_user_occupation = get_user_user_occupation_feat(data,dataFeat)
user_shop_review_num_level = get_user_shop_review_num_level_feat(data,dataFeat)
user_item_category_list_2 = get_user_item_category_list_2_feat(data,dataFeat)
#merge特征
data = pd.merge(data,item,on='item_id',how='left')
data = pd.merge(data,user,on='user_id',how='left')
data = pd.merge(data,context,on='context_id',how='left')
data = pd.merge(data,timestamp,on='context_timestamp',how='left')
data = pd.merge(data,shop,on='shop_id',how='left')
data = pd.merge(data,item_brand,on='item_brand_id',how='left')
data = pd.merge(data,user_gender,on='user_gender_id',how='left')
data = pd.merge(data,item_city,on='item_city_id',how='left')
data = pd.merge(data,context_page,on='context_page_id',how='left')
data = pd.merge(data,user_occupation,on='user_occupation_id',how='left')
data = pd.merge(data,shop_review_num_level,on='shop_review_num_level',how='left')
data = pd.merge(data,item_category_list_2,on='item_category_list_2',how='left')
#交互特征
data = pd.merge(data,user_item,on=['user_id','item_id'],how='left')
data = pd.merge(data,user_shop,on=['user_id','shop_id'],how='left')
data = pd.merge(data,user_context,on=['user_id','context_id'],how='left')
data = pd.merge(data,user_timestamp,on=['user_id','context_timestamp'],how='left')
data = pd.merge(data,user_item_brand,on=['user_id','item_brand_id'],how='left')
data = pd.merge(data,user_user_gender,on=['user_id','user_gender_id'],how='left')
data = pd.merge(data,user_item_city,on=['user_id','item_city_id'],how='left')
data = pd.merge(data,user_context_page,on=['user_id','context_page_id'],how='left')
data = pd.merge(data,user_user_occupation,on=['user_id','user_occupation_id'],how='left')
data = | pd.merge(data,user_shop_review_num_level,on=['user_id','shop_review_num_level'],how='left') | pandas.merge |
"""Compute the Delta Regulatory potential upon In silico deletion of TF binding sites"""
import h5py
import os
import sys
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
from .regpotential import regpotential
from . import Config
class EpigenomeData(object):
""" interface for loading Lisa data with configuration file"""
def __init__(self, species, epigenome):
self.config = Config(resource_filename("lisa", "lisa.ini"), species)
self.epigenome = epigenome
self.own_data_h5 = None
self.covariates_h5 = None
@property
def tr_high_quality_ids(self):
""" load ChiLin quality metrics and filter by cutoff
"""
quality = pd.read_table(self.config.get_meta, encoding="ISO-8859-1", index_col=0)
selector = (quality['UniquelyMappedRatio'] > 0.3) \
& (quality['MappedReadsNumber'] > 3e6) \
& (quality['AllPeaksNumber'] > 50) \
& (quality['PBC'] > 0.5) \
& (quality['FRiP'] > 0.003) \
#& (quality['UnionDHSRatio'] > 0.3)
sids = quality.loc[selector, 'X']
# print(sids.shape)
return list(set(map(str, list(sids))))
@property
def high_quality_ids(self):
""" load ChiLin quality metrics and filter by cutoff
"""
meta = | pd.read_csv(self.config.get_meta, sep='\t', encoding="ISO-8859-1", index_col=0) | pandas.read_csv |
from math import *
import requests
import pandas as pd
import re
import urllib.parse
import pickle
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import json
import smtplib, ssl
from email.mime.text import MIMEText
def get_data():
# available locations
print('Scraping data from CFB website')
reqget = requests.get(
url = 'https://www.chicagosfoodbank.org/find-food/covid-19-neighborhood-sites/',
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'})
html = reqget.content
df_list = pd.read_html(html)
df = df_list[-1]
df.columns = df.iloc[1]
df = df[2:]
df.to_csv('data/raw.csv',index=False)
# Google Form Responses
print('Gathering responses from google form')
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('_ags/google_creds.json', scope)
client = gspread.authorize(creds)
sheet = client.open("Greater Chicago Food Depository (Responses)").sheet1
list_of_hashes = sheet.get_all_values()
df = pd.DataFrame(list_of_hashes[1:],columns=['time','email','name','email2','address','radius'])
df = df[['name','email','address','radius']]
df.to_csv('data/users.csv',index=False)
def clean():
print('Cleaning CFB location data')
df = | pd.read_csv('data/raw.csv') | pandas.read_csv |
from logbook import Logger
from mock import patch, create_autospec, MagicMock, Mock
import pandas as pd
from ccxt.base.errors import RequestTimeout
from catalyst.exchange.exchange_errors import ExchangeRequestError
from .base import BaseExchangeTestCase
from catalyst.exchange.ccxt.ccxt_exchange import CCXT
from catalyst.exchange.exchange_execution import ExchangeLimitOrder
from catalyst.exchange.utils.exchange_utils import get_exchange_auth
from catalyst.finance.order import Order
log = Logger('test_ccxt')
class TestCCXT(BaseExchangeTestCase):
@classmethod
def setup(self):
exchange_name = 'bittrex'
auth = get_exchange_auth(exchange_name)
self.exchange = CCXT(
exchange_name=exchange_name,
key=auth['key'],
secret=auth['secret'],
password='',
quote_currency='usdt',
)
self.exchange.init()
def create_orders_dict(self, asset, last_order):
"""
create an orders dict which mocks the .orders object
:param asset: TradingPair
:param last_order: bool, adds another order to the dict.
mocks the functionality of the fetchOrder methods
:return: dict(Order)
"""
orders = dict()
orders['208612980769'] = Order(
dt= | pd.to_datetime('2018-05-01 17:34', utc=True) | pandas.to_datetime |
# Project: fuelmeter-tools
# Created by: # Created on: 5/7/2020
from pandas.tseries.offsets import MonthEnd
from puma.Report import Report
import pandas as pd
import numpy as np
import puma.plot as pplot
import puma.tex as ptex
import datetime
import os
class MultiMonthReport(Report):
def __init__(self,start,end,title,nc,houses,monthly_fuel_price):
super(MultiMonthReport, self).__init__(start,end,title,nc,houses,monthly_fuel_price)
def getAveCostPerDay(self):
'''calculates the average cost of fuel per day. If the attribute gph_hdd
is available this will be used to calculate costs otherwise the attribute
fuel_by_day is used.'''
if 'gpd_hdd' not in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
else:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
return self.cost_per_day.mean()
def getCostPerDay(self,fuel_by_day):
'''calculate cost for each day based on a fuel price for each day and fuel consumption for each day'''
self.fuel_price.name = 'fuel_price'
df = pd.concat([fuel_by_day, self.fuel_price.groupby(pd.Grouper(freq='D')).mean()], axis=1)
df.fuel_price = df.fuel_price.ffill() # filled for days that did not match
return df.fuel_consumption * df.fuel_price
# def getEstimatedTotalGallons(self):
# '''calculates the total gallons used each month and sets the attribute gallons_by_month
# :return float total gallons for the entire report period'''
# self.estimated_gallons_by_month = self.calculateTotalGallonsByMonth()
# return self.gallons_by_month.sum()
def getCostPerMonth(self):
'''calculates the total cost of consumed fuel per month by summing cost per day for every day within a month'''
if self.cost_per_day == None:
if 'gpd_hdd' in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
else:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
self.cost_per_month = self.cost_per_day.groupby(pd.Grouper(freq="M")).sum()
return
def getTotalCost(self):
'''uses hdd corrected estimate of fuel consumption to estimate cost per day and aggregate to the entire report period.'''
costPerDay = self.getCostPerDay(self.gpd_hdd)
return costPerDay.sum()
def calculateMeanDailyGallonsPerMonth(self):
'''Calculates the total gallons consumed by month based on an average daily consumption rate for each month'''
#actual measured total by day We use a count of 5 records as our cutoff for producing a legit average
groupedDaily = self.filtered_df['fuel_consumption'].groupby( | pd.Grouper(freq="D") | pandas.Grouper |
import json
from zoltpy import util
import pandas as pd
import pymmwr as pm
import datetime
import warnings
import requests
import io
warnings.simplefilter(action='ignore')
def get_epi_data(date):
format_str = '%m/%d/%y' # The format
dt = datetime.datetime.strptime(date, format_str).date()
epi = pm.date_to_epiweek(dt)
return epi.year, epi.week, epi.day
def get_epi_data_TZ(date):
format_str = '%Y-%m-%d' # The format
dt = datetime.datetime.strptime(date, format_str).date()
epi = pm.date_to_epiweek(dt)
epi_week = epi.week
epi_day = epi.day
if epi_day >= 3: # cut off is Tuesday
epi_week = epi_week + 1
return epi.year, epi_week, epi.day
def get_available_timezeros(project_name):
conn = util.authenticate()
project = [project for project in conn.projects if project.name == project_name][0]
project_timezeros = project.timezeros
timezero = []
for timezero_array in project_timezeros:
timezero += [timezero_array.timezero_date]
return timezero
def configure_JHU_data(df, target):
# convert matrix to repeating row format
df_truth = df.unstack()
df_truth = df_truth.reset_index()
# get epi data from date
df_truth['year'], df_truth['week'], df_truth['day'] = \
zip(*df_truth['level_0'].map(get_epi_data))
# rename columns
df_truth = df_truth.rename(columns={0: "value",
"level_1": "location_long"})
# Get state IDs
df_truth = df_truth.merge(fips_codes, left_on='location_long', right_on='state_name', how='left')
df_truth.loc[df_truth["location_long"] == "US", "state_code"] = "US"
df_truth["state_code"].replace({"US": 1000}, inplace=True) # so that can be converted to int
# convert FIPS code to int
df_truth = df_truth.dropna(subset=['state_code'])
df_truth["state_code"] = df_truth["state_code"].astype(int)
# add leading zeros to state code
df_truth['state_code'] = df_truth['state_code'].apply(lambda x: '{0:0>2}'.format(x))
# convert 1000 back to US
df_truth["state_code"].replace({"1000": "US"}, inplace=True)
df_truth.loc[df_truth["location_long"] == "US", "state"] = "nat"
# Observed data on the seventh day
# or group by week for incident deaths
if target == 'Incident Deaths':
df_vis = df_truth.groupby(['week', 'location_long'], as_index=False).agg({'level_0': 'last',
'value': 'sum',
'year': 'last',
'day': 'last',
'state_code': 'last',
'state': 'last',
'state_name': 'last'})
else:
df_vis = df_truth
df_vis['week'] = df_vis['week'] + 1 # shift epiweek on axis
# add leading zeros to epi week
df_vis['week'] = df_vis['week'].apply(lambda x: '{0:0>2}'.format(x))
# define epiweek
df_vis['epiweek'] = df_vis['year'].astype(str) + df_vis['week']
# rename columns
df_truth_long = df_vis.rename(columns={"state": "location",
"week": "epiweek",
"state_code": "unit",
"level_0": "date"})
# get timezero
df_truth_long['date'] = pd.to_datetime(df_truth_long['date'])
# initialize df_targets
df_targets = pd.DataFrame(columns=list(df_truth_long.columns).append('target'))
# use Saturday truth values
df_truth_values = df_truth_long[df_truth_long['day'] == 7]
# find week-ahead targets
for i in range(4):
weeks_ahead = i + 1 # add one to [0,3]
days_back = 5 + ((weeks_ahead - 1) * 7) # timezero is on Mondays
df_calc = df_truth_values # initialize df
# find timezero and target
df_calc['timezero'] = df_calc['date'] - datetime.timedelta(days=days_back)
if target == "Cumulative Deaths":
df_calc['target'] = "%i wk ahead cum death" % weeks_ahead
else:
df_calc['target'] = "%i wk ahead inc death" % weeks_ahead
# concatenate truth
df_targets = pd.concat([df_targets, df_calc])
# get epi data from Timezero
df_targets['timezero'] = df_targets['timezero'].astype(str)
df_targets['tz_year'], df_targets['tz_week'], df_targets['tz_day'] = \
zip(*df_targets['timezero'].map(get_epi_data_TZ))
# truth targets by timezero week
df_targets = df_targets[["tz_week", "unit", "target", "value"]]
# Map all timezeros in Zoltar to Corresponding weeks
df_map_wk_to_tz = pd.DataFrame(columns=['timezero'])
df_map_wk_to_tz['timezero'] = get_available_timezeros("COVID-19 Forecasts")
df_map_wk_to_tz['tz_year'], df_map_wk_to_tz['tz_week'], df_map_wk_to_tz['tz_day'] = \
zip(*df_map_wk_to_tz['timezero'].map(get_epi_data_TZ))
# Merge timezeros with truth values and targets
df_final = pd.merge(df_targets, df_map_wk_to_tz, how='right', on=['tz_week'])
# select columns
df_final = df_final[["timezero", "unit", "target", "value"]]
# drop empty rows
nan_value = float("NaN")
df_final.replace("", nan_value, inplace=True)
df_final.dropna(inplace=True)
return df_final
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv"
url_req = requests.get(url).content
df = pd.read_csv(io.StringIO(url_req.decode('utf-8')))
fips_codes = pd.read_csv('../template/state_fips_codes.csv')
# aggregate by state and nationally
state_agg = df.groupby(['Province_State']).sum()
us_nat = df.groupby(['Country_Region']).sum()
df_state_nat = state_agg.append(us_nat)
# drop unnecessary columns
cols = list(range(0, 6))
df_truth = df_state_nat.drop(df_state_nat.columns[cols], axis=1)
# calculate incidents from cumulative
df_truth_cumulative = df_truth
df_truth_incident = df_truth - df_truth.shift(periods=1, axis='columns')
# re-format files
df_cum_death = configure_JHU_data(df_truth_cumulative, "Cumulative Deaths")
df_inc_death = configure_JHU_data(df_truth_incident, "Incident Deaths")
# concatenate targers
zoltar_truth = | pd.concat([df_cum_death, df_inc_death]) | pandas.concat |
from scipy.optimize import minimize
import numpy as np
import argparse
import pandas as pd
import subprocess
import os
from repli1d.analyse_RFD import smooth
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--init', type=str, default="K562")
parser.add_argument('--alpha', type=float,default=0.1)
parser.add_argument('--root', type=str, default="./results/scipy_opti/")
parser.add_argument('--n', type=int, default=10)
parser.add_argument('--extension', type=int, default=5)
parser.add_argument('--command',type=str)
args = parser.parse_args()
root = args.root
os.makedirs(root,exist_ok=True)
whole_info = pd.read_csv(args.init)
x0 = np.array(whole_info.signal)
init_x0 = x0.copy()
x0[np.isnan(x0)] = 0
where = np.where(x0 != 0)
x0 = x0[where]
x0 /= np.sum(x0)
command = args.command
iter = 0
gscore = 0
def fun(x, alpha):
global iter
global gscore
signal = init_x0
signal[where] = x
if np.sum(x < 0) > 0:
return 2
filen = root + "/tmp.csv"
d = pd.DataFrame({"chrom": whole_info.chrom,
"chromStart": whole_info.chromStart,
"chromEnd": whole_info.chromStart,
"signalValue": signal})
d.to_csv(filen, index=False)
process = subprocess.Popen(command + " --signal %s --name %s" % (filen, root + "/tmp"), shell=True,
stdout=subprocess.PIPE)
process.wait()
scored = | pd.read_csv(root + "/tmpglobal_corre.csv") | pandas.read_csv |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = | IntervalIndex.from_breaks(breaks, name=name, closed=closed) | pandas.IntervalIndex.from_breaks |
import os
import pandas as pd
files = []
train_cell_lines = []
test_cell_lines = []
aucs = []
for file in os.listdir("results"):
if file.endswith(".txt"):
files.append(file)
files = sorted(files)
for filename in files:
f = open('results/' + filename, "r")
auc = f.read().split('AUC = ')[1]
f.close()
train_cell_lines.append(filename.split('_')[0])
test_cell_lines.append(filename.split('_')[1])
aucs.append(float(auc))
data = {
'train_cell_line': train_cell_lines,
'test_cell_line': test_cell_lines,
'auc': aucs
}
df = | pd.DataFrame(data) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: | pd.Timestamp("2012-08-10 00:00:00") | pandas.Timestamp |
import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex, PeriodIndex,
TimedeltaIndex, Series, isna)
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
(Timedelta('NaT'), TimedeltaIndex),
(Period('NaT', freq='M'), PeriodIndex)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
s = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_identity(klass):
assert klass(None) is NaT
result = klass(np.nan)
assert result is NaT
result = klass(None)
assert result is NaT
result = klass(iNaT)
assert result is NaT
result = klass(np.nan)
assert result is NaT
result = klass(float('nan'))
assert result is NaT
result = klass(NaT)
assert result is NaT
result = klass('NaT')
assert result is NaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_equality(klass):
# nat
if klass is not Period:
klass('').value == iNaT
klass('nat').value == iNaT
klass('NAT').value == iNaT
klass(None).value == iNaT
klass(np.nan).value == iNaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_round_nat(klass):
# GH14940
ts = klass('nat')
for method in ["round", "floor", "ceil"]:
round_method = getattr(ts, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert round_method(freq) is ts
def test_NaT_methods():
# GH 9513
raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
'fromordinal', 'fromtimestamp', 'isocalendar',
'strftime', 'strptime', 'time', 'timestamp',
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple']
nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
if hasattr(NaT, method):
with pytest.raises(ValueError):
getattr(NaT, method)()
for method in nan_methods:
if hasattr(NaT, method):
assert np.isnan(getattr(NaT, method)())
for method in nat_methods:
if hasattr(NaT, method):
# see gh-8254
exp_warning = None
if method == 'to_datetime':
exp_warning = FutureWarning
with tm.assert_produces_warning(
exp_warning, check_stacklevel=False):
assert getattr(NaT, method)() is NaT
# GH 12300
assert NaT.isoformat() == 'NaT'
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_isoformat(klass):
result = klass('NaT').isoformat()
expected = 'NaT'
assert result == expected
def test_nat_arithmetic():
# GH 6873
i = 2
f = 1.5
for (left, right) in [(NaT, i), (NaT, f), (NaT, np.nan)]:
assert left / right is NaT
assert left * right is NaT
assert right * left is NaT
with pytest.raises(TypeError):
right / left
# Timestamp / datetime
t = | Timestamp('2014-01-01') | pandas.Timestamp |
""" test indexing with ix """
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.types.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.core.common import PerformanceWarning
class TestIX(tm.TestCase):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
| tm.assert_series_equal(result1, result3) | pandas.util.testing.assert_series_equal |
from datetime import datetime
from http import HTTPStatus
from types import SimpleNamespace
import pytest
import pandas as pd
from fastapi import HTTPException
from osiris.core.enums import TimeResolution
from tests.conftest import client
def test_connection(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
check_directory_exist = mocker.patch('app.routers.grafana_json.check_directory_exist')
response = client.get(
'v1/grafana/12345',
headers={'client-id': 'mr_test', 'client-secret': 'secret'}
)
assert response.status_code == HTTPStatus.OK
assert directory_client.called
assert directory_client.call_args.args == ('12345', 'mr_test', 'secret')
assert check_directory_exist.called
def test_connection_missing_headers(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
check_directory_exist = mocker.patch('app.routers.grafana_json.check_directory_exist')
response = client.get(
'v1/grafana/12345'
)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert not directory_client.called
assert not check_directory_exist.called
def test_search(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
get_grafana_settings = mocker.patch('app.routers.grafana_json.__get_grafana_settings')
get_grafana_settings.return_value = {'metrics': ['c', 'a', 'b']}
response = client.post(
'v1/grafana/12345/search',
headers={'client-id': 'mr_test', 'client-secret': 'secret'}
)
assert response.status_code == HTTPStatus.OK
assert directory_client.called
assert directory_client.call_args.args == ('12345', 'mr_test', 'secret')
assert get_grafana_settings.called
assert response.json() == ['a', 'b', 'c']
def test_query_without_targets(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
response = client.post(
'v1/grafana/12345/query',
headers={'client-id': 'mr_test', 'client-secret': 'secret'},
json={'maxDataPoints': 31,
'adhocFilters': [],
'intervalMs': 47,
'targets': [],
'range': {'from': '2021-01-01',
'to': '2021-02-01'
}
}
)
assert response.status_code == HTTPStatus.OK
assert not directory_client.called
response = client.post(
'v1/grafana/12345/query',
headers={'client-id': 'mr_test', 'client-secret': 'secret'},
json={'maxDataPoints': 31,
'adhocFilters': [],
'intervalMs': 47,
'targets': [{'refId': 'id', 'target': '', 'type': 'atype', 'data': {}}],
'range': {'from': '2021-01-01', 'to': '2021-02-01'}
}
)
assert response.status_code == HTTPStatus.OK
assert not directory_client.called
def test_query_with_targets(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
get_grafana_settings = mocker.patch('app.routers.grafana_json.__get_grafana_settings')
retrieve_data = mocker.patch('app.routers.grafana_json.__retrieve_data')
filter_with_adhoc_filters = mocker.patch('app.routers.grafana_json.__filter_with_adhoc_filters')
dataframe_to_response = mocker.patch('app.routers.grafana_json.__dataframe_to_response')
response = client.post(
'v1/grafana/12345/query',
headers={'client-id': 'mr_test', 'client-secret': 'secret'},
json={'maxDataPoints': 31,
'adhocFilters': [],
'intervalMs': 47,
'targets': [{'refId': 'id', 'target': 'atarget', 'type': 'atype', 'data': {}}],
'range': {'from': '2021-01-01T10:01:02', 'to': '2021-02-01T08:23:20'}
}
)
assert response.status_code == HTTPStatus.OK
assert directory_client.called
assert get_grafana_settings.called
assert retrieve_data.called
assert retrieve_data.await_args.args[0] == datetime(2021, 1, 1, 10, 1, 2)
assert retrieve_data.await_args.args[1] == datetime(2021, 2, 1, 8, 23, 20)
assert filter_with_adhoc_filters.called
assert dataframe_to_response.called
assert response.json() == []
def test_annotations():
response = client.post(
'v1/grafana/12345/annotations',
headers={'client-id': 'mr_test', 'client-secret': 'secret'}
)
assert response.status_code == HTTPStatus.OK
assert response.json() == []
def test_tag_keys(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
get_grafana_settings = mocker.patch('app.routers.grafana_json.__get_grafana_settings')
get_grafana_settings.return_value = {'tag_keys': ['<KEY>']}
response = client.post(
'v1/grafana/12345/tag-keys',
headers={'client-id': 'mr_test', 'client-secret': 'secret'}
)
assert response.status_code == HTTPStatus.OK
assert directory_client.called
assert directory_client.call_args.args == ('12345', 'mr_test', 'secret')
assert get_grafana_settings.called
assert response.json() == ['a', 'b', 'c']
def test_tag_values(mocker):
directory_client = mocker.patch('app.routers.grafana_json.__get_directory_client')
get_grafana_settings = mocker.patch('app.routers.grafana_json.__get_grafana_settings')
get_grafana_settings.return_value = {'tag_values': {'test_key': ['<KEY>']}}
response = client.post(
'v1/grafana/12345/tag-values',
headers={'client-id': 'mr_test', 'client-secret': 'secret'},
json={'key': 'test_key'}
)
assert response.status_code == HTTPStatus.OK
assert directory_client.called
assert directory_client.call_args.args == ('12345', 'mr_test', 'secret')
assert get_grafana_settings.called
assert response.json() == ['a', 'b', 'c']
response = client.post(
'v1/grafana/12345/tag-values',
headers={'client-id': 'mr_test', 'client-secret': 'secret'},
json={'key': 'unknown_key'}
)
assert response.status_code == HTTPStatus.OK
assert directory_client.called
assert get_grafana_settings.called
assert response.json() == []
def test_is_targets_set_for_all():
from app.routers.grafana_json import __is_targets_set_for_all
targets = []
result = __is_targets_set_for_all(targets)
assert not result
targets = [SimpleNamespace(**{'refId': 'id', 'target': '', 'type': 'atype', 'data': 'adata'}),
SimpleNamespace(**{'refId': 'id', 'target': '323', 'type': 'atype', 'data': 'adata'})]
result = __is_targets_set_for_all(targets)
assert not result
targets = [SimpleNamespace(**{'refId': 'id', 'target': '23', 'type': 'atype', 'data': 'adata'}),
SimpleNamespace(**{'refId': 'id', 'target': '323', 'type': 'atype', 'data': 'adata'})]
result = __is_targets_set_for_all(targets)
assert result
def test_filter_with_additional_filters():
from app.routers.grafana_json import __filter_with_additional_filters
data = [{'metric_1': 'value_1', 'metric_2': 'value_2'},
{'metric_1': 'value_2', 'metric_2': 'value_1'},
{'metric_1': 'value_6', 'metric_2': 'value_4'},
{'metric_1': 'value_2', 'metric_2': 'value_2'}]
data_df = pd.DataFrame(data)
additional_filters = {'metric_1': 'value_2', 'metric_2': 'value_1'}
target = 'test_query'
target_return_name, res_data_df = __filter_with_additional_filters(data_df, target, additional_filters)
assert target_return_name == 'value_2_value_1_test_query'
assert res_data_df.shape[0] == 1
assert res_data_df.to_dict(orient='records') == [{'metric_1': 'value_2', 'metric_2': 'value_1'}]
def test_dataframe_to_response(mocker):
from app.routers.grafana_json import __dataframe_to_response
resample_timeframe = mocker.patch('app.routers.grafana_json.__resample_timeframe')
dataframe_to_timeserie_response = mocker.patch('app.routers.grafana_json.__dataframe_to_timeserie_response')
dataframe_to_table_response = mocker.patch('app.routers.grafana_json.__dataframe_to_table_response')
data_df = pd.DataFrame([])
additional_filters = {}
target = 'Raw'
target_type = 'timeseries'
freq = '1000ms'
grafana_settings = {}
result = __dataframe_to_response(data_df, target_type, target, additional_filters, freq, grafana_settings)
assert result == []
data = [{'metric_1': 'value_1', 'metric_2': 'value_2'},
{'metric_1': 'value_2', 'metric_2': 'value_1'},
{'metric_1': 'value_6', 'metric_2': 'value_4'},
{'metric_1': 'value_2', 'metric_2': 'value_2'}]
data_df = | pd.DataFrame(data) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.iloc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_iloc_frame(self, data):
df = pd.DataFrame({"A": data, 'B':
np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.iloc[:4, [0]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.iloc[[0, 1, 2, 3], [0]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
def test_loc_series(self, data):
ser = pd.Series(data)
result = ser.loc[:3]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.loc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_loc_frame(self, data):
df = pd.DataFrame({"A": data,
'B': np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.loc[:3, ['A']]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.loc[[0, 1, 2, 3], ['A']]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
# sequence -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
def test_getitem_scalar(self, data):
result = data[0]
assert isinstance(result, data.dtype.type)
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
# Empty mask, in series
mask = np.zeros(len(data), dtype=bool)
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
# non-empty mask, raw array
mask[0] = True
result = data[mask]
assert len(result) == 1
assert isinstance(result, type(data))
# non-empty mask, in series
result = pd.Series(data)[mask]
assert len(result) == 1
assert result.dtype == data.dtype
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
assert isinstance(result, type(data))
result = data[slice(1)] # scalar
assert isinstance(result, type(data))
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
assert result.iloc[1] == data[1]
assert result.iloc[2] == data[3]
def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
na_cmp(result[1], na_value)
with tm.assert_raises_regex(IndexError, "out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
result = empty.take([-1])
na_cmp(result[0], na_value)
with tm.assert_raises_regex(IndexError, "cannot do a non-empty take"):
empty.take([0, 1])
@pytest.mark.xfail(reason="Series.take with extension array buggy for -1")
def test_take_series(self, data):
s = | pd.Series(data) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [ | Series() | pandas.Series |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename,
"STUDYINFO",
"RTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} found: {valrow["VALUE"]}')
aecg.RTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"unit",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity_unit"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity_unit"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
# =======================================
# PTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/"
"componentOf/protocolTimepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"PTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} found: {valrow["VALUE"]}')
aecg.PTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} not found')
if log_validation:
valpd = valpd.append( | pd.DataFrame([valrow], columns=VALICOLS) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import re
import os
import numpy as np
from collections import namedtuple
from template import Template
ParticleProperties = namedtuple('ParticleProperties', 'd k density m gamma I')
def NewParticleProperties(d=1.0, k=1e6, rho=1.0, gamma=0.0):
volume = 4./3. * np.pi * (d/2)**3
m = volume * rho
I = 2./5. * m * (d/2)**2
return ParticleProperties(
d=d,
m=m,
k=k,
density=rho,
gamma=gamma,
I=I,
)
def CacheMyDataFrame(cache_name):
def decorator(func):
def wrapper(*args, **kwargs):
arg_hash = str(args)
filename = cache_name + arg_hash + '.pkl'
if os.path.exists(filename):
print('[CACHE]: loading {0}'.format(filename))
data = | pd.read_pickle(filename) | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 11:37:28 2020
@author: Dripta
"""
from flask import Flask, jsonify, request, render_template, url_for, send_file, send_from_directory, safe_join, abort, redirect
from covidindia import *
from flask import Response
import json
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import os
from hideFile import hider, saveFile
import shutil
from platform import system
import pickle
from flask_cachebuster import CacheBuster
file_path = os.path.dirname(os.path.abspath(__file__))
if system() == 'Windows':
if os.path.exists(os.path.join(file_path, 'static', 'files')):
shutil.rmtree(os.path.join(file_path, 'static', 'files'))
if not os.path.exists(os.path.join(file_path, 'static', 'server_data')):
os.mkdir(os.path.join(file_path, 'static', 'server_data'))
print('Gathering required data....')
init = initializer(silent=True)
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'wb') as init_file:
pickle.dump(init, init_file)
filter_data = Data(init)
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'wb') as filter_file:
pickle.dump(filter_data, filter_file)
print('Gathering Demographic data....')
demo = Demographic_overview(init, silent=True)
with open(os.path.join(file_path, 'static', 'server_data', 'demo.pkl'), 'wb') as demo_file:
pickle.dump(demo, demo_file)
print('Gathering test data.....')
tested_df = filter_data.tested_subject_data()
tested_df.to_csv(os.path.join(file_path, 'static',
'server_data', 'tested_data.csv'), index=False)
else:
if os.path.exists(os.path.join(file_path, 'static', '.files')):
shutil.rmtree(os.path.join(file_path, 'static', '.files'))
if not os.path.exists(os.path.join(file_path, 'static', 'server_data')):
os.mkdir(os.path.join(file_path, 'static', 'server_data'))
print('Gathering required data....')
init = initializer(silent=True)
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'wb') as init_file:
pickle.dump(init, init_file)
filter_data = Data(init)
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'wb') as filter_file:
pickle.dump(filter_data, filter_file)
print('Gathering Demographic data....')
demo = Demographic_overview(init, silent=True)
with open(os.path.join(file_path, 'static', 'server_data', 'demo.pkl'), 'wb') as demo_file:
pickle.dump(demo, demo_file)
print('Gathering test data.....')
tested_df = filter_data.tested_subject_data()
tested_df.to_csv(os.path.join(file_path, 'static',
'server_data', 'tested_data.csv'), index=False)
try:
os.mkdir(os.path.join(file_path, 'static', 'files'))
except:
print("Files can't be made due to some error.")
try:
hider(os.path.join(file_path, 'static', 'files'))
except:
print("Warning:Can't hide files")
print('Building server')
app = Flask(__name__, static_url_path='/public')
app.config["ENV"] = 'development'
app.config['SERVER_DATA'] = os.path.join(file_path, 'static', 'server_data')
config = {'extensions': ['.js', '.css', '.csv',
'.jpg', '.png', '.gif'], 'hash_size': 10}
cache_buster = CacheBuster(config=config)
cache_buster.init_app(app)
if system() == 'Windows':
app.config["CLIENT_DATA"] = os.path.join(file_path, 'static', 'files')
else:
app.config["CLIENT_DATA"] = os.path.join(file_path, 'static', '.files')
def test_df(dataset):
state_list = np.unique(dataset['state'])
test_table = pd.DataFrame()
for i in state_list:
try:
table = dataset[dataset['state']
== i].replace('', pd.NaT).dropna()
values = table.tail(1)
test_table = pd.concat([test_table, values])
except:
pass
test_table['F'] = (test_table['positive'].astype(
'int')/test_table['totaltested'].astype('int'))*100
return test_table
@app.route('/', methods=['GET'])
def home():
tested_df = pd.read_csv(os.path.join(
file_path, 'static', 'server_data', 'tested_data.csv'))
# with open(os.path.join(file_path, 'static', 'server_data', 'demo.pkl'), 'rb') as file:
#demo = pickle.load(file)
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'rb') as file:
init = pickle.load(file)
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'rb') as file:
filter_data = pickle.load(file)
date = datetime.strptime(init.csv_Confirmed.columns[-1], '%m/%d/%Y')
date = datetime.strftime(date, '%d/%m/%Y')
conf_count = init.csv_Confirmed[init.csv_Confirmed.columns[-1]].tolist()[-1]
change_conf = init.csv_Confirmed[init.csv_Confirmed.columns[-1]].tolist(
)[-1]-init.csv_Confirmed[init.csv_Confirmed.columns[-2]].tolist()[-1]
recover_count = init.csv_recovered[init.csv_recovered.columns[-1]].tolist()[-1]
change_recover = init.csv_recovered[init.csv_recovered.columns[-1]].tolist(
)[-1]-init.csv_recovered[init.csv_recovered.columns[-2]].tolist()[-1]
death_count = init.csv_Death[init.csv_Death.columns[-1]].tolist()[-1]
change_death = init.csv_Death[init.csv_Death.columns[-1]].tolist(
)[-1]-init.csv_Death[init.csv_Death.columns[-2]].tolist()[-1]
active_count = conf_count-recover_count-death_count
change_active = change_conf-change_recover-change_death
rank_df = filter_data.rank(1, 'Total Confirmed', cumulative=True)
rank_df_rec = filter_data.rank(1, 'Total Recovered', cumulative=True)
rank_df_death = filter_data.rank(1, 'Total Death', cumulative=True)
f_ratio = test_df(tested_df)
f_ratio_highest = f_ratio.sort_values(by='F', ascending=False).iloc[0, :]
state_data = filter_data.get_dataset_state()
state_data['recovery_rate'] = (
state_data['Total Recovered']/state_data['Total Confirmed'])*100
state_data['Death_rate'] = (
state_data['Total Death']/state_data['Total Confirmed'])*100
state_data = state_data.iloc[:-1, :].dropna()
rec_rate_highest = state_data.sort_values(
by='recovery_rate', ascending=False).iloc[0, :]
death_rate_highest = state_data.sort_values(
by='Death_rate', ascending=False).iloc[0, :]
spike_df_conf = init.count_conf.iloc[-1, 1:]
spike_df_rec = init.count_recover.iloc[-1, 1:]
spike_df_death = init.count_death.iloc[-1, 1:]
return render_template('index_dashboard.html', conf_number=conf_count, recover_number=recover_count,
death_number=death_count, active_number=active_count, date_1=date,
number_high_conf=rank_df['Total Confirmed'].values[0], state_high_conf=rank_df['STATE/UT'].values[0],
number_high_death=rank_df_death['Total Death'].values[
0], state_high_death=rank_df_death['STATE/UT'].values[0],
number_high_recover=rank_df_rec['Total Recovered'].values[
0], state_high_recover=rank_df_rec['STATE/UT'].values[0],
number_high_conftest=float('{0:.2f}'.format(f_ratio_highest['F'])), state_high_conftest=f_ratio_highest['state'],
number_high_deathrate=float('{0:.2f}'.format(death_rate_highest['Death_rate'])), state_high_deathrate=death_rate_highest['STATE/UT'],
number_high_recoverrate=float('{0:.2f}'.format(rec_rate_highest['recovery_rate'])), state_high_recoverrate=rec_rate_highest['STATE/UT'],
high_occur=max(spike_df_conf), high_occur_date=spike_df_conf[spike_df_conf == max(spike_df_conf)].index[0],
high_occur_rec=max(spike_df_rec), high_occur_date_rec=spike_df_rec[spike_df_rec == max(spike_df_rec)].index[0],
high_occur_death=max(spike_df_death), high_occur_death_date=spike_df_death[spike_df_death == max(spike_df_death)].index[0],
up_text_conf=change_conf, up_text_active=change_active, up_text_recover=change_recover,
up_text_death=change_death)
@app.route('/data', methods=['GET', 'POST'])
def data():
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'rb') as file:
init = pickle.load(file)
if request.method == 'GET':
'''for i in ['Confirmed','Recovered','Death']:
df = init.show_data(of=i)
saveFile(df, i)'''
return render_template('index_home.html')
elif request.method == 'POST':
value = request.form['result_data']
daily = request.form['daily_data']
if daily == 'No':
df = init.show_data(of=value.split(' ')[1])
saveFile(df, f'{value}-{daily}')
df = df.to_json(orient='records')
else:
df = init.show_data(of=value.split(' ')[1], daily=True)
saveFile(df, f'{value}-{daily}')
df = df.to_json(orient='records')
return df
@app.route('/State', methods=['GET', 'POST'])
def state():
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'rb') as file:
filter_data = pickle.load(file)
if request.method == 'GET':
return render_template('index_state.html')
elif request.method == 'POST':
state_name = request.form['state_data']
district_name = request.form['district_data']
date_name = request.form['date_data']
daily = request.form['daily_data']
if state_name == 'All':
if date_name == 'All':
if daily == 'Yes':
df = filter_data.get_dataset_state(
state='Whole', daily=True)
saveFile(
df, f'{state_name}-{district_name}-{date_name}-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.get_dataset_state(
state='Whole', daily=False)
saveFile(
df, f'{state_name}-{district_name}-{date_name}-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
if daily == 'Yes':
df_confirmed = filter_data.get_count_by_date(
by='confirmed', date=date_name)
df_recovered = filter_data.get_count_by_date(
by='recovered', date=date_name)
df_death = filter_data.get_count_by_date(
by='death', date=date_name)
df = pd.merge(df_confirmed, df_recovered, on='STATE/UT')
df = pd.merge(df, df_death, on='STATE/UT')
df.columns = ['STATE/UT',
'Confirmed', 'Recovered', 'Death']
saveFile(
df, f'{state_name}-{district_name}-({date_name.replace("/","-")})-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.get_dataset_by_date(date=date_name)
saveFile(
df, f'{state_name}-{district_name}-({date_name.replace("/","-")})-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
if district_name == 'All':
if date_name == 'All':
if daily == 'Yes':
df = filter_data.get_dataset_state(
state=state_name, daily=True).reset_index()
df.columns = ['Date', 'Confirmed',
'Recovered', 'Death']
saveFile(
df, f'{state_name}-{district_name}-{date_name}-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.get_dataset_state(
state=state_name, daily=False)
saveFile(
df, f'{state_name}-{district_name}-{date_name}-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
if daily == 'No':
df = filter_data.get_district_data_by_date(
state_name, date=date_name)
saveFile(
df, f'{state_name}-{district_name}-({date_name.replace("/","-")})-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.get_district_data_by_date(
state_name, date=date_name, daily=True)
saveFile(
df, f'{state_name}-{district_name}-({date_name.replace("/","-")})-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
if date_name == 'All':
if daily == 'Yes':
df = filter_data.get_district_data_by_date(
district_name, daily=True)
saveFile(
df, f'{state_name}-{district_name}-{date_name}-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.get_district_data_by_date(
district_name, daily=False)
saveFile(
df, f'{state_name}-{district_name}-{date_name}-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
if daily == 'Yes':
df = filter_data.get_district_data_by_date(
district_name, date=date_name, daily=True)
saveFile(
df, f'{state_name}-{district_name}-({date_name.replace("/","-")})-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.get_district_data_by_date(
district_name, date=date_name)
saveFile(
df, f'{state_name}-{district_name}-({date_name.replace("/","-")})-{daily}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
@app.route('/Demography', methods=['GET', 'POST'])
def demography():
with open(os.path.join(file_path, 'static', 'server_data', 'demo.pkl'), 'rb') as file:
demo = pickle.load(file)
if request.method == 'GET':
return render_template('index_demo.html')
elif request.method == 'POST':
place = request.form['place_data']
date = request.form['date_day']
try:
df = demo.demography(place=place.lower(),
date=date.lower()).reset_index()
for i, j in enumerate(df.dateannounced):
df.dateannounced[i] = datetime.strftime(j, '%d/%m/%Y')
if date != 'All':
tag = date.replace('/', '-')
saveFile(df, f'{place}-({tag})')
else:
saveFile(df, f'{place}-{date}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
except:
return 'None'
@app.route('/filter', methods=['POST'])
def disfilter():
with open(os.path.join(file_path, 'static', 'server_data', 'demo.pkl'), 'rb') as file:
demo = pickle.load(file)
place_name = request.form['place_data']
df = demo.raw[demo.raw['detectedstate'] == place_name]
if df.empty == False:
result_list = list(
np.unique([i for i in df['detecteddistrict']]))
return Response(json.dumps(result_list), mimetype='application/json')
else:
df = demo.raw[demo.raw['detecteddistrict'] == place_name]
result_list = list(np.unique([i for i in df['detectedcity']]))
return Response(json.dumps(result_list), mimetype='application/json')
@app.route('/Rank', methods=['GET', 'POST'])
def rank():
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'rb') as file:
filter_data = pickle.load(file)
if request.method == 'GET':
return render_template('index_rank.html')
elif request.method == 'POST':
kind = request.form['kind_data']
num = int(request.form['number_data'])
by = request.form['by_data']
cumulative = request.form['cumulative_data']
date = request.form['date_data']
if date == 'None':
if cumulative == 'False':
state = request.form['state_data']
df = filter_data.rank(
num=num, by=by, kind=kind.lower(), cumulative=False)
df = pd.DataFrame(df[state]).reset_index()
df.columns = ['Date', f'{state}']
saveFile(df, f'{kind}-{num}-{by}-Daily-AllDate-{state}')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.rank(
num=num, by=by, kind=kind.lower(), cumulative=True)
saveFile(df, f'{kind}-{num}-{by}-Cumulative-AllDate')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
if cumulative == 'False':
df = filter_data.rank(
num=num, by=by, kind=kind.lower(), cumulative=False, date=date)
saveFile(
df, f'{kind}-{num}-{by}-Daily-({date.replace("/","-")})')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
else:
df = filter_data.rank(
num=num, by=by, kind=kind.lower(), cumulative=True, date=date)
saveFile(
df, f'{kind}-{num}-{by}-Cumulative-({date.replace("/","-")})')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
@app.route('/analysis', methods=['GET'])
def analysis():
global init, demo, filter_data, tested_df
tested_df = pd.read_csv(os.path.join(
file_path, 'static', 'server_data', 'tested_data.csv'))
with open(os.path.join(file_path, 'static', 'server_data', 'demo.pkl'), 'rb') as file:
demo = pickle.load(file)
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'rb') as file:
init = pickle.load(file)
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'rb') as file:
filter_data = pickle.load(file)
if request.method == 'GET':
return render_template('index_analysis.html')
@app.route('/tested', methods=['POST'])
def tested():
global init, demo, filter_data, tested_df
ratio = request.form['ratio_data']
state_list = init.csv_Confirmed['STATE/UT']
test_table = pd.DataFrame()
for i in state_list:
try:
table = tested_df[tested_df['state']
== i].replace('', pd.NaT).dropna()
values = table.tail(1)
test_table = pd.concat([test_table, values])
except:
pass
test_table['F'] = (test_table['positive'].astype(
'int')/test_table['totaltested'].astype('int'))*100
tested_population = [init.csv_Confirmed[init.csv_Confirmed['STATE/UT']
== i]['POPULATION'].values[0] for i in test_table['state']]
tested_population = [int(i.replace(',', ''))
for i in tested_population]
test_table['Population'] = tested_population
if ratio == 'true':
dataset = test_table.sort_values(by='F', ascending=False)
dataset = dataset.to_json(orient='records')
return Response(dataset, mimetype='application/json')
else:
test_table['totaltested'] = test_table['totaltested'].astype(
'int')
test_table['test_pop_ratio'] = (
test_table['totaltested']/test_table['Population'])*10000
dataset = test_table.sort_values(
by='test_pop_ratio', ascending=False)
dataset = dataset.to_json(orient='records')
return Response(dataset, mimetype='application/json')
@app.route('/date/<endpoint>', methods=['GET', 'POST'])
def date(endpoint):
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'rb') as file:
init = pickle.load(file)
if endpoint == 'rank' or endpoint == 'demo':
if request.method == 'GET':
csv = init.csv_Confirmed
lastDate = csv.columns[-1]
return lastDate
elif endpoint == 'state':
if request.method == 'POST':
state_data = request.form['state']
df = init.district_data.district_data
state_df = df[df['State'] == state_data]
startdate = list(state_df.sort_values('Date')['Date'])[0]
enddate = datetime.strftime(datetime.strptime(list(state_df.sort_values('Date')[
'Date'])[-1], '%Y-%m-%d')-timedelta(1), '%Y-%m-%d')
# enddate=list(state_df.sort_values('Date')['Date'])[-1]
returnlist = [startdate, enddate]
return Response(json.dumps(returnlist), mimetype='application/json')
@app.route('/rec_dec_rate', methods=['POST'])
def rate():
with open(os.path.join(file_path, 'static', 'server_data', 'filter_data.pkl'), 'rb') as file:
filter_data = pickle.load(file)
rate_data = request.form['rate']
state_data = filter_data.get_dataset_state()
state_data['recovery_rate'] = (
state_data['Total Recovered']/state_data['Total Confirmed'])*100
state_data['Death_rate'] = (
state_data['Total Death']/state_data['Total Confirmed'])*100
state_data = state_data.iloc[:-1, :].dropna()
if rate_data == 'recovered':
dataset = state_data.sort_values(
by='recovery_rate', ascending=False)
dataset = dataset.to_json(orient='records')
return Response(dataset, mimetype='application/json')
elif rate_data == 'deceased':
dataset = state_data.sort_values(
by='Death_rate', ascending=False)
dataset = dataset.to_json(orient='records')
return Response(dataset, mimetype='application/json')
@app.route('/statistics/<gtype>', methods=['GET', 'POST'])
def graphtype(gtype):
global init, demo, filter_data
if gtype == 'age_bar_chart':
age_data = demo.raw[demo.raw['agebracket'] != 'Unknown'].reset_index()
age_data = age_data.drop(columns=['index']).tail(1000)
age_series = list(age_data['agebracket'])
print(len(age_series))
for i, j in enumerate(age_series):
if '-' in j:
l = j.split('-')
age_series[i] = (float(l[0])+float(l[1]))/2
else:
try:
age_series[i] = float(j)
except:
age_series[i] = float(j.split(' ')[0])
age_bar_dict = {'age': [], 'count': []}
for i in np.arange(0, max(age_series) + 1, 10):
temp_list = []
age_bar_dict['age'].append(f'({i},{i+10})')
for j in age_series:
if j >= i:
if j < i+10:
temp_list.append(j)
age_bar_dict['count'].append(
len(temp_list))
age_bar = pd.DataFrame(age_bar_dict)
age_bar = age_bar.to_json(orient='records')
return Response(age_bar, mimetype='application/json')
elif gtype == 'rolling_growth':
if request.method == 'POST':
rolling_value = int(request.form['rolling'])
state_data = init.count_conf
state_data = state_data[state_data['STATE/UT']
!= "Unassigned State"]
state_rolling_data = state_data.iloc[:-1,
1:].rolling(rolling_value, axis=1).mean()
state_rolling_data = pd.concat(
[state_data['STATE/UT'][:-1], state_rolling_data], axis=1)
state_rolling_data = state_rolling_data.fillna(0)
state_rolling_data = state_rolling_data.to_json(orient='records')
return Response(state_rolling_data, mimetype='application/json')
elif gtype == 'corona_graph':
if request.method == 'POST':
state_name = request.form['state_data'].replace('"', '')
daily = request.form['daily_data'].replace('"', '')
condition = request.form['condition_data'].replace('"', '')
last = int(request.form['silder_data'])
if last == 0:
if state_name == 'All':
if daily == 'Yes':
if condition == 'Confirmed':
df = init.count_conf
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = init.count_recover
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = init.count_death
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = init.count_conf.to_json(
orient='records')
df_2 = init.count_recover.to_json(
orient='records')
df_3 = init.count_death.to_json(
orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
if condition == 'Confirmed':
df = init.csv_Confirmed
df = df.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR)', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = init.csv_recovered
df = df.drop(columns=['POPULATION', 'LONGITUDE', 'PER CAPITA INCOME (INR)',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = init.csv_Death
df = df.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR) ', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = init.csv_Confirmed.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR)', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df_1 = df_1.to_json(orient='records')
df_2 = init.csv_recovered.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR)', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df_2 = df_2.to_json(orient='records')
df_3 = init.csv_Death.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR) ', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df_3 = df_3.to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
if daily == 'Yes':
if condition == 'Confirmed':
df = init.count_conf[init.count_conf['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = init.count_recover[init.count_recover['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = init.count_death[init.count_death['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = init.count_conf[init.count_conf['STATE/UT']
== state_name].to_json(orient='records')
df_2 = init.count_recover[init.count_recover['STATE/UT']
== state_name].to_json(orient='records')
df_3 = init.count_death[init.count_death['STATE/UT']
== state_name].to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
if condition == 'Confirmed':
df = init.csv_Confirmed[init.csv_Confirmed['STATE/UT']
== state_name]
df = df.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR)', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = init.csv_recovered[init.csv_recovered['STATE/UT']
== state_name]
df = df.drop(columns=['POPULATION', 'LONGITUDE', 'PER CAPITA INCOME (INR)',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = init.csv_Death[init.csv_Death['STATE/UT']
== state_name]
df = df.drop(columns=['POPULATION', 'PER CAPITA INCOME (INR) ', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = init.csv_Confirmed[init.csv_Confirmed['STATE/UT'] == state_name].drop(columns=['POPULATION', 'PER CAPITA INCOME (INR)', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df_1 = df_1.to_json(orient='records')
df_2 = init.csv_recovered[init.csv_recovered['STATE/UT'] == state_name].drop(columns=['POPULATION', 'PER CAPITA INCOME (INR)', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df_2 = df_2.to_json(orient='records')
df_3 = init.csv_Death[init.csv_Death['STATE/UT'] == state_name].drop(columns=['POPULATION', 'PER CAPITA INCOME (INR) ', 'LONGITUDE',
'LATITUDE', 'CODE', 'AVERAGE TEMPERATURE (°C)'])
df_3 = df_3.to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
endDate = datetime.strptime(
init.csv_Confirmed.columns[-1], '%m/%d/%Y')
lastdate = datetime.strftime(endDate, '%d/%m/%Y')
startDate = datetime.strftime(
endDate-timedelta(last), '%d/%m/%Y')
if state_name == 'All':
if daily == 'Yes':
if condition == 'Confirmed':
df = filter_data.get_count_between_date(
startDate, lastdate, condition)
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = filter_data.get_count_between_date(
startDate, lastdate, condition)
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = filter_data.get_count_between_date(
startDate, lastdate, 'Death')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = filter_data.get_count_between_date(
startDate, lastdate, 'Confirmed').to_json(orient='records')
df_2 = filter_data.get_count_between_date(
startDate, lastdate, 'Recovered').to_json(orient='records')
df_3 = filter_data.get_count_between_date(
startDate, lastdate, 'Death').to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
if condition == 'Confirmed':
df = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Confirmed')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Recovered')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Death')
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Confirmed').to_json(orient='records')
df_2 = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Recovered').to_json(orient='records')
df_3 = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Death').to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
if daily == 'Yes':
if condition == 'Confirmed':
df = filter_data.get_count_between_date(
startDate, lastdate, condition)
df = df[df['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = filter_data.get_count_between_date(
startDate, lastdate, condition)
df = df[df['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = filter_data.get_count_between_date(
startDate, lastdate, 'Death')
df = df[df['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = filter_data.get_count_between_date(
startDate, lastdate, 'Confirmed')
df_1 = df_1[df_1['STATE/UT']
== state_name].to_json(orient='records')
df_2 = filter_data.get_count_between_date(
startDate, lastdate, 'Recovered')
df_2 = df_2[df_2['STATE/UT']
== state_name].to_json(orient='records')
df_3 = filter_data.get_count_between_date(
startDate, lastdate, 'Death')
df_3 = df_3[df_3['STATE/UT']
== state_name].to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
else:
if condition == 'Confirmed':
df = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Confirmed')
df = df[df['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Recovered':
df = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Recovered')
df = df[df['STATE/UT']
== state_name]
return Response(df, mimetype='application/json')
elif condition == 'Deceased':
df = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Death')
df = df[df['STATE/UT']
== state_name]
df = df.to_json(orient='records')
return Response(df, mimetype='application/json')
elif condition == 'Together':
df_1 = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Confirmed')
df_1 = df_1[df_1['STATE/UT'] == state_name]
df_1 = df_1.to_json(orient='records')
df_2 = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Recovered')
df_2 = df_2[df_2['STATE/UT'] == state_name]
df_2 = df_2.to_json(orient='records')
df_3 = filter_data.get_cum_dataset_between_date(
startDate, lastdate, 'Total Death')
df_3 = df_3[df_3['STATE/UT'] == state_name]
df_3 = df_3.to_json(orient='records')
df_list = [df_1, df_2, df_3]
return Response(json.dumps(df_list), mimetype='application/json')
elif gtype == 'mapdata':
if request.method == 'POST':
with open(os.path.join(file_path, 'static', 'server_data', 'init.pkl'), 'rb') as file:
init = pickle.load(file)
reject_list = ['la', 'ld', 'tg', 'ut', 'un', 'tt']
dtype = request.form['dtype']
if dtype == 'confirmed':
conf = init.show_data(of='confirmed', daily=False)
df = conf[['CODE', f'{conf.columns[-1]}']]
df = pd.concat([df, pd.DataFrame({'CODE': [
'IN-UT'], f'{conf.columns[-1]}':[0]})]).reset_index().drop(columns=['index'])
elif dtype == 'recovered':
recover = init.show_data(of='recovered', daily=False)
df = recover[['CODE', f'{recover.columns[-1]}']]
df = pd.concat([df, pd.DataFrame({'CODE': [
'IN-UT'], f'{recover.columns[-1]}':[0]})]).reset_index().drop(columns=['index'])
elif dtype == 'death':
death = init.show_data(of='death', daily=False)
df = death[['CODE', f'{death.columns[-1]}']]
df = pd.concat([df, pd.DataFrame({'CODE': [
'IN-UT'], f'{death.columns[-1]}':[0]})]).reset_index().drop(columns=['index'])
elif dtype == 'active':
conf = init.show_data(of='confirmed', daily=False)
recover = init.show_data(of='recovered', daily=False)
death = init.show_data(of='death', daily=False)
active_series = conf[f'{conf.columns[-1]}'] - \
recover[f'{recover.columns[-1]}'] - \
death[f'{death.columns[-1]}']
df = pd.concat([ | pd.DataFrame(conf['CODE']) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import pickle
import sys
from distutils.version import LooseVersion
import pytest
import numpy as np
import pyarrow as pa
import pyarrow.tests.util as test_util
def test_schema_constructor_errors():
msg = ("Do not call Schema's constructor directly, use `pyarrow.schema` "
"instead")
with pytest.raises(TypeError, match=msg):
pa.Schema()
def test_type_integers():
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']
for name in dtypes:
factory = getattr(pa, name)
t = factory()
assert str(t) == name
def test_type_to_pandas_dtype():
M8_ns = np.dtype('datetime64[ns]')
cases = [
(pa.null(), np.float64),
(pa.bool_(), np.bool_),
(pa.int8(), np.int8),
(pa.int16(), np.int16),
(pa.int32(), np.int32),
(pa.int64(), np.int64),
(pa.uint8(), np.uint8),
(pa.uint16(), np.uint16),
(pa.uint32(), np.uint32),
(pa.uint64(), np.uint64),
(pa.float16(), np.float16),
(pa.float32(), np.float32),
(pa.float64(), np.float64),
(pa.date32(), M8_ns),
(pa.date64(), M8_ns),
(pa.timestamp('ms'), M8_ns),
(pa.binary(), np.object_),
(pa.binary(12), np.object_),
(pa.string(), np.object_),
(pa.list_(pa.int8()), np.object_),
# (pa.list_(pa.int8(), 2), np.object_), # TODO needs pandas conversion
]
for arrow_type, numpy_type in cases:
assert arrow_type.to_pandas_dtype() == numpy_type
@pytest.mark.pandas
def test_type_to_pandas_dtype_check_import():
# ARROW-7980
test_util.invoke_script('arrow_7980.py')
def test_type_list():
value_type = pa.int32()
list_type = pa.list_(value_type)
assert str(list_type) == 'list<item: int32>'
field = pa.field('my_item', pa.string())
l2 = pa.list_(field)
assert str(l2) == 'list<my_item: string>'
def test_type_comparisons():
val = pa.int32()
assert val == pa.int32()
assert val == 'int32'
assert val != 5
def test_type_for_alias():
cases = [
('i1', pa.int8()),
('int8', pa.int8()),
('i2', pa.int16()),
('int16', pa.int16()),
('i4', pa.int32()),
('int32', pa.int32()),
('i8', pa.int64()),
('int64', pa.int64()),
('u1', pa.uint8()),
('uint8', pa.uint8()),
('u2', pa.uint16()),
('uint16', pa.uint16()),
('u4', pa.uint32()),
('uint32', pa.uint32()),
('u8', pa.uint64()),
('uint64', pa.uint64()),
('f4', pa.float32()),
('float32', pa.float32()),
('f8', pa.float64()),
('float64', pa.float64()),
('date32', pa.date32()),
('date64', pa.date64()),
('string', pa.string()),
('str', pa.string()),
('binary', pa.binary()),
('time32[s]', pa.time32('s')),
('time32[ms]', pa.time32('ms')),
('time64[us]', pa.time64('us')),
('time64[ns]', pa.time64('ns')),
('timestamp[s]', pa.timestamp('s')),
('timestamp[ms]', pa.timestamp('ms')),
('timestamp[us]', pa.timestamp('us')),
('timestamp[ns]', pa.timestamp('ns')),
('duration[s]', pa.duration('s')),
('duration[ms]', pa.duration('ms')),
('duration[us]', pa.duration('us')),
('duration[ns]', pa.duration('ns')),
]
for val, expected in cases:
assert pa.type_for_alias(val) == expected
def test_type_string():
t = pa.string()
assert str(t) == 'string'
def test_type_timestamp_with_tz():
tz = 'America/Los_Angeles'
t = pa.timestamp('ns', tz=tz)
assert t.unit == 'ns'
assert t.tz == tz
def test_time_types():
t1 = pa.time32('s')
t2 = pa.time32('ms')
t3 = pa.time64('us')
t4 = pa.time64('ns')
assert t1.unit == 's'
assert t2.unit == 'ms'
assert t3.unit == 'us'
assert t4.unit == 'ns'
assert str(t1) == 'time32[s]'
assert str(t4) == 'time64[ns]'
with pytest.raises(ValueError):
pa.time32('us')
with pytest.raises(ValueError):
pa.time64('s')
def test_from_numpy_dtype():
cases = [
(np.dtype('bool'), pa.bool_()),
(np.dtype('int8'), pa.int8()),
(np.dtype('int16'), pa.int16()),
(np.dtype('int32'), pa.int32()),
(np.dtype('int64'), pa.int64()),
(np.dtype('uint8'), pa.uint8()),
(np.dtype('uint16'), pa.uint16()),
(np.dtype('uint32'), pa.uint32()),
(np.dtype('float16'), pa.float16()),
(np.dtype('float32'), pa.float32()),
(np.dtype('float64'), pa.float64()),
(np.dtype('U'), pa.string()),
(np.dtype('S'), pa.binary()),
(np.dtype('datetime64[s]'), pa.timestamp('s')),
(np.dtype('datetime64[ms]'), pa.timestamp('ms')),
(np.dtype('datetime64[us]'), pa.timestamp('us')),
(np.dtype('datetime64[ns]'), pa.timestamp('ns')),
(np.dtype('timedelta64[s]'), pa.duration('s')),
(np.dtype('timedelta64[ms]'), pa.duration('ms')),
(np.dtype('timedelta64[us]'), pa.duration('us')),
(np.dtype('timedelta64[ns]'), pa.duration('ns')),
]
for dt, pt in cases:
result = pa.from_numpy_dtype(dt)
assert result == pt
# Things convertible to numpy dtypes work
assert pa.from_numpy_dtype('U') == pa.string()
assert pa.from_numpy_dtype(np.unicode) == pa.string()
assert pa.from_numpy_dtype('int32') == pa.int32()
assert pa.from_numpy_dtype(bool) == pa.bool_()
with pytest.raises(NotImplementedError):
pa.from_numpy_dtype(np.dtype('O'))
with pytest.raises(TypeError):
pa.from_numpy_dtype('not_convertible_to_dtype')
def test_schema():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
assert sch.field('foo').name == 'foo'
assert sch.field('foo').type == fields[0].type
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
with pytest.raises(TypeError):
pa.schema([None])
def test_schema_to_string_with_metadata():
lorem = """\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla accumsan vel
turpis et mollis. Aliquam tincidunt arcu id tortor blandit blandit. Donec
eget leo quis lectus scelerisque varius. Class aptent taciti sociosqu ad
litora torquent per conubia nostra, per inceptos himenaeos. Praesent
faucibus, diam eu volutpat iaculis, tellus est porta ligula, a efficitur
turpis nulla facilisis quam. Aliquam vitae lorem erat. Proin a dolor ac libero
dignissim mollis vitae eu mauris. Quisque posuere tellus vitae massa
pellentesque sagittis. Aenean feugiat, diam ac dignissim fermentum, lorem
sapien commodo massa, vel volutpat orci nisi eu justo. Nulla non blandit
sapien. Quisque pretium vestibulum urna eu vehicula."""
# ARROW-7063
my_schema = pa.schema([pa.field("foo", "int32", False,
metadata={"key1": "value1"}),
pa.field("bar", "string", True,
metadata={"key3": "value3"})],
metadata={"lorem": lorem})
assert my_schema.to_string() == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'
-- schema metadata --
lorem: '""" + lorem[:65] + "' + " + str(len(lorem) - 65)
# Metadata that exactly fits
result = pa.schema([('f0', 'int32')],
metadata={'key': 'value' + 'x' * 62}).to_string()
assert result == """\
f0: int32
-- schema metadata --
key: 'valuexxxxxxxxxxxxxxxxxxxxxxxxxxxxx\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"""
assert my_schema.to_string(truncate_metadata=False) == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'
-- schema metadata --
lorem: '{}'""".format(lorem)
assert my_schema.to_string(truncate_metadata=False,
show_field_metadata=False) == """\
foo: int32 not null
bar: string
-- schema metadata --
lorem: '{}'""".format(lorem)
assert my_schema.to_string(truncate_metadata=False,
show_schema_metadata=False) == """\
foo: int32 not null
-- field metadata --
key1: 'value1'
bar: string
-- field metadata --
key3: 'value3'"""
assert my_schema.to_string(truncate_metadata=False,
show_field_metadata=False,
show_schema_metadata=False) == """\
foo: int32 not null
bar: string"""
def test_schema_from_tuples():
fields = [
('foo', pa.int32()),
('bar', pa.string()),
('baz', pa.list_(pa.int8())),
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
with pytest.raises(TypeError):
pa.schema([('foo', None)])
def test_schema_from_mapping():
fields = OrderedDict([
('foo', pa.int32()),
('bar', pa.string()),
('baz', pa.list_(pa.int8())),
])
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'baz']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>
child 0, item: int8"""
fields = OrderedDict([('foo', None)])
with pytest.raises(TypeError):
pa.schema(fields)
def test_schema_duplicate_fields():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('foo', pa.list_(pa.int8())),
]
sch = pa.schema(fields)
assert sch.names == ['foo', 'bar', 'foo']
assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())]
assert len(sch) == 3
assert repr(sch) == """\
foo: int32
bar: string
foo: list<item: int8>
child 0, item: int8"""
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
with pytest.warns(FutureWarning):
assert sch.field_by_name('bar') == fields[1]
with pytest.warns(FutureWarning):
assert sch.field_by_name('xxx') is None
with pytest.warns((UserWarning, FutureWarning)):
assert sch.field_by_name('foo') is None
def test_field_flatten():
f0 = pa.field('foo', pa.int32()).with_metadata({b'foo': b'bar'})
assert f0.flatten() == [f0]
f1 = pa.field('bar', pa.float64(), nullable=False)
ff = pa.field('ff', pa.struct([f0, f1]), nullable=False)
assert ff.flatten() == [
pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}),
pa.field('ff.bar', pa.float64(), nullable=False)] # XXX
# Nullable parent makes flattened child nullable
ff = pa.field('ff', pa.struct([f0, f1]))
assert ff.flatten() == [
pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}),
pa.field('ff.bar', pa.float64())]
fff = pa.field('fff', pa.struct([ff]))
assert fff.flatten() == [pa.field('fff.ff', pa.struct([f0, f1]))]
def test_schema_add_remove_metadata():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
s1 = pa.schema(fields)
assert s1.metadata is None
metadata = {b'foo': b'bar', b'pandas': b'badger'}
s2 = s1.with_metadata(metadata)
assert s2.metadata == metadata
s3 = s2.remove_metadata()
assert s3.metadata is None
# idempotent
s4 = s3.remove_metadata()
assert s4.metadata is None
def test_schema_equals():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
metadata = {b'foo': b'bar', b'pandas': b'badger'}
sch1 = pa.schema(fields)
sch2 = pa.schema(fields)
sch3 = pa.schema(fields, metadata=metadata)
sch4 = pa.schema(fields, metadata=metadata)
assert sch1.equals(sch2, check_metadata=True)
assert sch3.equals(sch4, check_metadata=True)
assert sch1.equals(sch3)
assert not sch1.equals(sch3, check_metadata=True)
assert not sch1.equals(sch3, check_metadata=True)
del fields[-1]
sch3 = pa.schema(fields)
assert not sch1.equals(sch3)
def test_schema_equals_propagates_check_metadata():
# ARROW-4088
schema1 = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string())
])
schema2 = pa.schema([
pa.field('foo', pa.int32()),
pa.field('bar', pa.string(), metadata={'a': 'alpha'}),
])
assert not schema1.equals(schema2, check_metadata=True)
assert schema1.equals(schema2)
def test_schema_equals_invalid_type():
# ARROW-5873
schema = pa.schema([pa.field("a", pa.int64())])
for val in [None, 'string', pa.array([1, 2])]:
with pytest.raises(TypeError):
schema.equals(val)
def test_schema_equality_operators():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
metadata = {b'foo': b'bar', b'pandas': b'badger'}
sch1 = pa.schema(fields)
sch2 = pa.schema(fields)
sch3 = pa.schema(fields, metadata=metadata)
sch4 = pa.schema(fields, metadata=metadata)
assert sch1 == sch2
assert sch3 == sch4
# __eq__ and __ne__ do not check metadata
assert sch1 == sch3
assert not sch1 != sch3
assert sch2 == sch4
# comparison with other types doesn't raise
assert sch1 != []
assert sch3 != 'foo'
def test_schema_get_fields():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
assert schema.field('foo').name == 'foo'
assert schema.field(0).name == 'foo'
assert schema.field(-1).name == 'baz'
with pytest.raises(KeyError):
schema.field('other')
with pytest.raises(TypeError):
schema.field(0.0)
with pytest.raises(IndexError):
schema.field(4)
def test_schema_negative_indexing():
fields = [
pa.field('foo', pa.int32()),
pa.field('bar', pa.string()),
pa.field('baz', pa.list_(pa.int8()))
]
schema = pa.schema(fields)
assert schema[-1].equals(schema[2])
assert schema[-2].equals(schema[1])
assert schema[-3].equals(schema[0])
with pytest.raises(IndexError):
schema[-4]
with pytest.raises(IndexError):
schema[3]
def test_schema_repr_with_dictionaries():
fields = [
pa.field('one', pa.dictionary(pa.int16(), pa.string())),
pa.field('two', pa.int32())
]
sch = pa.schema(fields)
expected = (
"""\
one: dictionary<values=string, indices=int16, ordered=0>
two: int32""")
assert repr(sch) == expected
def test_type_schema_pickling():
cases = [
pa.int8(),
pa.string(),
pa.binary(),
pa.binary(10),
pa.list_(pa.string()),
pa.map_(pa.string(), pa.int8()),
pa.struct([
pa.field('a', 'int8'),
pa.field('b', 'string')
]),
pa.union([
pa.field('a', pa.int8()),
pa.field('b', pa.int16())
], pa.lib.UnionMode_SPARSE),
pa.union([
pa.field('a', pa.int8()),
pa.field('b', pa.int16())
], pa.lib.UnionMode_DENSE),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.date64(),
pa.timestamp('ms'),
pa.timestamp('ns'),
pa.decimal128(12, 2),
pa.field('a', 'string', metadata={b'foo': b'bar'})
]
for val in cases:
roundtripped = pickle.loads(pickle.dumps(val))
assert val == roundtripped
fields = []
for i, f in enumerate(cases):
if isinstance(f, pa.Field):
fields.append(f)
else:
fields.append(pa.field('_f{}'.format(i), f))
schema = pa.schema(fields, metadata={b'foo': b'bar'})
roundtripped = pickle.loads(pickle.dumps(schema))
assert schema == roundtripped
def test_empty_table():
schema = pa.schema([
pa.field('f0', pa.int64()),
pa.field('f1', pa.dictionary(pa.int32(), pa.string())),
pa.field('f2', pa.list_(pa.list_(pa.int64()))),
])
table = schema.empty_table()
assert isinstance(table, pa.Table)
assert table.num_rows == 0
assert table.schema == schema
@pytest.mark.pandas
def test_schema_from_pandas():
import pandas as pd
inputs = [
list(range(10)),
pd.Categorical(list(range(10))),
['foo', 'bar', None, 'baz', 'qux'],
np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'
], dtype='datetime64[ns]'),
]
if LooseVersion(pd.__version__) >= '1.0.0':
inputs.append(pd.array([1, 2, None], dtype=pd.Int32Dtype()))
for data in inputs:
df = | pd.DataFrame({'a': data}) | pandas.DataFrame |
class resource_database():
import pandas as pd
import ujson as json
from io import StringIO
from multiprocessing import Pool
from functools import partial
import ast
import os
import re
import glob
import textwrap
from contextlib import suppress
from pandas.errors import EmptyDataError
from selenium.common.exceptions import WebDriverException
import gnureadline
from prompt_toolkit import PromptSession
global tag_aliases,db,families,cat_files,wrapper,suppress,directory,id_to_cat,ps
global pd,json,StringIO,Pool,partial,ast,os,re,textwrap,WebDriverException,glob,EmptyDataError,suppress
#global open_cat,close_cat,close_all_cats,add_cat,add_cat_attributes
#global get_tag_aliases,add_alias,find,add_family,add_ref,save,end,show
ps = PromptSession()
wrapper = textwrap.TextWrapper(initial_indent=" ")
directory = os.path.dirname(os.path.realpath(__file__)) + '/'
with open(directory+'ID_to_cat.txt') as file:
id_to_cat = ast.literal_eval(file.read())
#print(var)
with open(directory+'tag_aliases.csv', 'r') as file:
tag_aliases = [set(line[:-1].split(',')) for line in file.readlines()]
with open(directory+'families.txt', 'r') as file:
families = json.loads(file.read())
#for key,lst in families.items():
# families[key] = set(lst)
cat_files = {}
import os
for file_name in os.listdir(directory+"categories"):
if not file_name.startswith('.'):
cat_name = file_name[:-4]
cat_files[cat_name] = None
@classmethod
def get_ID_to_cat(self,ID):
global id_to_cat
if id_to_cat is None:
with open(directory+"ID_to_cat.txt","r") as file:
id_to_cat = ast.literal_eval(file.read())
try:
return id_to_cat[str(ID)]
except KeyError:
print("No ref with specified ID was found!")
return []
@classmethod
def add_ref_to_id_to_cat(self,ID,cats):
global id_to_cat
if id_to_cat is None:
with open(directory+"ID_to_cat.txt","r") as file:
id_to_cat = ast.literal_eval(file.read())
id_to_cat[str(ID)] = cats
def is_a_cat(cat):
return cat in cat_files
def get_input(query):
while True:
user_input = ps.prompt(query).lower()
lst_input = re.split("[, ]+",user_input)
if lst_input[0] == "show":
print()
attr = lst_input[1] if len(lst_input) > 1 else re.split("[, ]+",ps.prompt("Attribute to show: "))[0]
if attr == "tag":
cats = ""
while True:
cats = ps.prompt("Categories to search for tags (type 'all' to include all tags): ")
if cats == "show":
resource_database.show(["cats"])
else:
break
resource_database.show(["tags",re.split("[, ]+", cats)])
elif attr == "alias":
resource_database.show(["aliases"])
elif attr == "cat":
resource_database.show(["cats"])
elif attr == "fam":
resource_database.show(["families"])
else:
print("Field '"+attr+"' does not exist.")
"""
if lst_input[1] == "key":
query = ["keys",re.split("[, ]+",input(
"Categories to search for keys (type 'all' to include all keys): "))]
resource_database.show(query)
"""
print()
else:
return user_input.lower()
@classmethod
def SetParser(self,data):
return ast.literal_eval(data)
@classmethod
def load_tags(self):
with open(directory+'tag_aliases.csv', 'r') as file:
tag_aliases = [set(line.split(',')) for line in file.readlines()]
@classmethod
def load_families(self):
with open(directory+'families.txt', 'r') as file:
families = json.loads(file.read())
@classmethod
def open_cat(self,cat_name):
if cat_name in cat_files and cat_files[cat_name] is not None:
return True
try:
converters = {s: (lambda data : None if data=="" else ast.literal_eval(data)) for s in
['keys','tags']}
cat_files[cat_name] = pd.read_csv(directory + "categories/"+cat_name+".csv",
converters=converters,index_col=0)
return True
except (FileNotFoundError,EmptyDataError):
temp = self.get_input("Category does not exist. Create a new category? ")
if temp.lower() == "yes":
open(directory+"categories/"+cat_name+".csv","w+").close()
cat_files[cat_name] = pd.DataFrame()#columns=["tags","keys","summary",
#"family","ref type","date","ref"])
return True
else:
print("Okay, category not created.")
return False
@classmethod
def close_cat(self,cat_name):
cat_files[cat_name].to_csv(directory+"categories/"+cat_name+".csv")
cat_files[cat_name] = None
@classmethod
def close_all_cats(self):
for cat_name in cat_files.keys():
close_cat(cat_name)
@classmethod
def add_cat(self,cat_name,cat_attr=None):
if cat_name in cat_files:
return False
f = open(cat_name +".txt","w+")
f.write("{}")
cat_files[cat_name] = None
@classmethod
def edit_cat_attributes(self,cat_name,cat_attr):
self.open_cat(cat_name)
if isinstance(cat_attr, list):
cat_files[cat_name].extend(cat_attr)
else:
cat_files[cat_name].append(cat_attr)
@classmethod
def get_tag_aliases(self,tag):
tag = tag.lower()
for equiv in tag_aliases:
if tag in equiv:
return equiv
@classmethod
def add_alias(self,lst):
final ={i.lower() for i in lst}
for equiv in tag_aliases:
for l in lst:
if l in equiv:
final.update(equiv)
tag_aliases.remove(equiv)
break
tag_aliases.append(final)
@classmethod
def query(self,cats=None,tags=None,families=None,ref_types=None):
if cats == None:
cats = cat_files.keys()
if tags != None:
tags = set(tags)
if ref_types != None:
ref_types = set(ref_types)
hit_ID = []
hits = []
hit_cat_names = []
for cat_name in cats:
if cat_name not in cat_files:
print("\nWarning: "+cat_name+" is not the name of a category.")
continue
if cat_files[cat_name] is None:
self.open_cat(cat_name)
for ID,ref_info in cat_files[cat_name].iterrows():
if ID not in hit_ID:
if tags == None or len(tags.intersection(ref_info['tags'])) > 0:
if families == None or ref_info['family'] in families:
if ref_types == None or ref_info['ref type'] in ref_types:
hit_ID.append(int(ID))
hit_cat_names.append(cat_name)
hits.append(ref_info)
return hits,hit_ID
@classmethod
def add_family(self,family_name,cats=[]):
#families[family_name] = set(cats)
families[family_name] = list(cats)
@classmethod
def add_ref(self,ref,cats=[],tags=None,keys=None,summary=None,family=None,ref_type=None):
if ref in ["download","downloads"]:
old_path = max(glob.iglob(os.path.expanduser('~/Downloads/*')), key=lambda a:os.stat(a).st_birthtime)
new_path = os.path.expanduser("~/resources/downloads/")+ os.path.basename(old_path)
os.rename(old_path,new_path)
ref = new_path
if ref_type == None:
if len(ref) > 3 and (ref[0:4] == "http" or ref[0:4] == "www."):
ref_type = "url"
elif " " not in ref and "/" in ref:
ref_type = "file"
else:
ref_type = "note"
if ref_type == "url":
if ref[0:4] != "www." and ref[0:4] != "http":
ref = "www." + ref
import datetime
t = datetime.date.today().strftime("%B %d, %Y")
if family != None:
if family not in families:
families[family] = list(cats)
else:
for c in cats:
if c not in families[family]:
families[family].append(c)
series = pd.Series({"tags":tags,"keys":keys,"summary":summary,"family":family,
"ref type":ref_type,"date":t,"ref":ref})
with open(directory+"max_ID.txt","r+") as file:
#a = "wow"
curr_max_ID = int(file.read().replace('\x00',''))
curr_max_ID += 1
file.truncate(0)
file.write(str(curr_max_ID))
series.name = str(curr_max_ID)
#with open("resources/ref_ID","a") as file:
# file.write("\n"+ID + ":" + cats)
for cat_name in cats:
self.open_cat(cat_name)
cat_files[cat_name] = cat_files[cat_name].append(series)
#cat_files[cat_name] = pd.DataFrame(series).transpose()#pd.DataFrame(series,columns=["tags","keys","summary",
# "family","type","date","ref"])
self.close_cat(cat_name)
self.add_ref_to_id_to_cat(curr_max_ID,cats)
@classmethod
def save(self):
with open(directory+'tag_aliases.csv', 'w') as file:
for i in tag_aliases:
file.write(",".join(i) + "\n")
with open(directory+'families.txt','w') as file:
#file.truncate()
file.write(json.dumps(families))
for cat_name,df in cat_files.items():
if df is not None:
df.to_csv(directory+"categories/" + cat_name+".csv")
if id_to_cat is not None:
with open(directory+'ID_to_cat.txt','w') as file:
#file.truncate()
file.write(json.dumps(id_to_cat))
"""
with open('resources/resources.txt', 'w') as file:
file.truncate()
file.write("{")
for key,df in db.items():
file.write("\""+key+ "\":" + df.to_csv(sep="`"))
file.write("}")
"""
@classmethod
def end(self):
self.save()
exit()
@classmethod
def show(self,query):
#query = [q.lower() for q in query]
if query[0] in ["cats","cat","categories","category"]:
print(self.get_contents(list(cat_files.keys())))
elif query[0] == "alias" or query[0] == "aliases":
for t in tag_aliases:
print(t)
elif query[0] == "tags":
if query[1] == ["all"]:
query[1] = cat_files.keys()
tags = set()
failed_cats = []
for cat in query[1]:
self.open_cat(cat)
try:
tags.update({t for ref_tags in cat_files[cat].loc[:,"tags"] for t in ref_tags})
except KeyError:
failed_cats.append(cat)
self.close_cat(cat)
print("\n" + self.get_contents(tags))
if len(failed_cats) > 0:
print("\n Note that the following were not valid categories, and thus were skipped:")
print(wrapper.fill(self.get_contents(failed_cats)))
elif query[0] == "family" or query[0] == "families":
print(self.get_contents(families))
@classmethod
def get(self,num_hits="all",features=None,cats=None,tags=None,families=None,ref_types=None):
ordered_cols = ["date","family","keys","ref type","summary","tags","ref"]
display_columns = []
if features is None:
features = ["keys","tags","family","summary","ref"]
for i in ordered_cols:
if features == "all" or i in features:
display_columns.append(i)
hits,hit_IDs = self.query(cats,tags,families,ref_types)
#df = pd.concat(hits, axis=1, keys=[hit.name for hit in hits])
#df["cat"] = hit_cat_names
if len(hits) == 0:
return pd.DataFrame(),[]
df = pd.DataFrame.from_records(hits)
if len(df.index) > 0:
if len(df.index) == 1:
df = df.loc[:,display_columns].iloc[:len(display_columns)]
else:
df = df.loc[:,display_columns].iloc[:,:len(display_columns)]
#df = df.reindex(columns=ordered_cols)
pd.set_option('display.width', 200)
pd.set_option('display.max_columns',100)
pd.set_option('display.max_colwidth',60)
if num_hits == "all" or num_hits > len(df.index):
return df,hit_IDs
return df.head(num_hits),hit_IDs[:num_hits]
else:
return pd.DataFrame(),[]
@classmethod
def scroll(self,page_size=10,features=None,cats=None,tags=None,families=None,ref_types=None):
all_hits,all_hits_IDs = self.get(num_hits="all",features="all",cats=cats,tags=tags,families=families,ref_types=ref_types)
if len(all_hits.index) == 0:
print("\n\nNo matching refs.")
return
row_num = 0
driver = None
if features is None:
features = ["keys","tags","family","ref type"]
pd.set_option('display.width', 175)
pd.set_option('display.max_columns',100)
| pd.set_option('display.max_colwidth',1000) | pandas.set_option |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
| assert_series_equal(actual, s2) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Toolkit Authors
"""Base Statistic Calculation module."""
import pandas as pd
from abc import ABCMeta
from pydtk.statistics import calculator
class BaseStatisticCalculation(metaclass=ABCMeta):
"""Base Statistic Calculation."""
def __init__(self, target_span=60.0, sync_timestamps=False):
"""Initialize Base Statistics Calculation class.
Args:
target_span (float): interval of statistics calculation
sync_timestamps (bool): if True, the output timestamps will
start from 'timestamp // span * span'
"""
self.target_span = target_span
self.sync_timestamps = sync_timestamps
def _get_calculator(self, dtype):
"""Get calculator by data type.
Args:
dtype (string): dtype of ndarray
Returns:
dtype_calculator (object): calculator class
"""
kwargs = {'target_span': self.target_span, 'sync_timestamps': self.sync_timestamps}
if "bool" in dtype:
dtype_calculator = getattr(calculator, "BoolCalculator")(**kwargs)
else:
dtype_calculator = getattr(calculator, "FloatCalculator")(**kwargs)
return dtype_calculator
def calculate(self, timestamps, data, operation):
"""Divide and calculate statistics of divided data.
Args:
timestamps (ndarray): timestamps [sec]
data (ndarray): input data
operation (str): operation
Returns:
index_timestamps (ndarray): timestamps [sec]
stat_data (ndarray): mean of input data
"""
self.calculator = self._get_calculator(str(data.dtype))
index_timestamps, stat_data = getattr(self.calculator, operation)(timestamps, data)
return index_timestamps, stat_data
def mean(self, timestamps, data):
"""Divide and return means of divided data."""
return self.calculate(timestamps, data, "mean")
def max(self, timestamps, data):
"""Divide and return maximum of divided data."""
return self.calculate(timestamps, data, "max")
def min(self, timestamps, data):
"""Divide and return minimum of divided data."""
return self.calculate(timestamps, data, "min")
def count(self, timestamps, data):
"""Divide and return count of True in divided data."""
return self.calculate(timestamps, data, "count")
def statistic_tables(self, timestamps, data, columns):
"""Make statistic tables.
Args:
timestamps (ndarray): timestamps [sec]
data (ndarray): input data
columns (str): columns of table
Returns:
df_dict (dict): dict includes statistic DataFrames
"""
self.calculator = self._get_calculator(str(data.dtype))
for i, operation in enumerate(self.calculator.operations):
index_timestamps, stat_data = self.calculate(timestamps, data, operation)
time_df = pd.DataFrame(data=index_timestamps, columns=["timestamp"])
if i == 0:
stat_df = time_df
if stat_data.ndim == 1:
stat_data = stat_data.reshape(-1, 1)
ope_columns = [column + "/" + operation for column in columns]
_stat_df = | pd.DataFrame(data=stat_data, columns=ope_columns) | pandas.DataFrame |
import pandas as pd
from math import sqrt
from scipy.stats import norm
from copy import deepcopy
import utils
import global_vars as gv
import weights_corr2_4 as wnc
from agg_sensitivities import k_delta, k_vega, k_curvature
class MarginByRiskClass:
def __init__(self, crif, calculation_currency):
self.crif = crif
self.results = gv.dict_margin_by_risk_class
self.calculation_currency = calculation_currency
self.list_risk_types = utils.unique_list(self.crif, 'RiskType')
# Delta Margin for Rates Risk Classes Only (Risk_IRCurve, Risk_Inflation, Risk_XCcyBasis)
def IRDeltaMargin(self):
updates = deepcopy(gv.dict_margin_by_risk_class)
# Skip any risk types other than rates
if ('Risk_IRCurve' not in self.list_risk_types) and \
('Risk_Inflation' not in self.list_risk_types) and \
('Risk_XCcyBasis' not in self.list_risk_types):
return pd.DataFrame(updates)
else:
dict_CR = {}
list_K = []
list_S = []
crif = self.crif[(self.crif['RiskType'].isin(['Risk_IRCurve', 'Risk_Inflation', 'Risk_XCcyBasis']))]
currency_list = utils.unique_list(crif, 'Qualifier')
for currency in currency_list:
list_WS = []
tenor_K = []
index = []
# CRIF by currency
crif_currency = crif[(crif['Qualifier'] == currency)]
# Risk_XCcyBasis is not considered for the concentration risk factor(CR) calculation
crif_wo_xccybasis = crif_currency.drop(crif_currency[(crif_currency.RiskType == 'Risk_XCcyBasis')].index)
# Concentration Thresholds
T = wnc.T('Rates','Delta',currency=currency)
CR = utils.concentration_threshold(utils.sum_sensitivities(crif_wo_xccybasis), T)
dict_CR[currency] = CR
# Iteration over the rates risk type existing in the CRIF
list_rates_risk_types = utils.unique_list([risk_class for risk_class in crif_currency['RiskType'] if risk_class in ['Risk_IRCurve', 'Risk_Inflation', 'Risk_XCcyBasis']])
for risk_class in list_rates_risk_types:
# CRIF by risk type
crif_risk_class = crif_currency[crif_currency['RiskType'] == risk_class]
# Sensitivities Sum
sensitivities = utils.sum_sensitivities(crif_risk_class)
if risk_class == 'Risk_Inflation':
RW = wnc.inflation_rw
WS = RW * sensitivities * CR
list_WS.append(WS)
tenor_K.append('Inf')
index.append('Inf')
elif risk_class == 'Risk_XCcyBasis':
RW = wnc.ccy_basis_swap_spread_rw
WS = RW * sensitivities
list_WS.append(WS)
tenor_K.append('XCcy')
index.append('XCcy')
elif risk_class == 'Risk_IRCurve':
# Mapping sensitivity to tenor k
# e.g. 3m: 10000
dict_sensitivities = {}
# Curve types such as LIBOR3M, OIS, etc
subcurve_list = utils.unique_list(crif_risk_class, 'Label2')
for subcurve in subcurve_list:
# CRIF by curve
crif_subcurve = crif_risk_class[crif_risk_class['Label2']==subcurve]
dict_sensitivities_tenor = {}
# Iteration over tenors
for tenor in utils.tenor_list(crif_subcurve):
# CRIF by tenor
crif_tenor = crif_subcurve[crif_subcurve['Label1']==tenor]
# Sensitivities by tenor
dict_sensitivities_tenor[tenor] = utils.sum_sensitivities(crif_tenor)
dict_sensitivities[subcurve] = dict_sensitivities_tenor
# Ultimately, it is stored like,
# {'Libor3m': {'1m': 32, '3m': 64},
# 'OIS': {'2Y': 128, '5Y': 256}}
#Regular Volatility
if currency in wnc.reg_vol_ccy_bucket:
RW = wnc.reg_vol_rw[tenor]
#Low Volatility
elif currency in wnc.low_vol_ccy_bucket:
RW = wnc.low_vol_rw[tenor]
#High Volatility
else:
RW = wnc.high_vol_rw[tenor]
s = dict_sensitivities[subcurve][tenor]
WS = RW * s * CR
list_WS.append(WS)
tenor_K.append(tenor)
index.append(subcurve)
K = k_delta('Rates',list_WS,tenor=tenor_K,index=index,calculation_currency=self.calculation_currency)
list_K.append(K)
S_b = max(min(sum(list_WS),K),-K)
list_S.append(S_b)
K_squared_sum = sum([x**2 for x in list_K])
for i in range(len(currency_list)):
for j in range(len(currency_list)):
if i == j:
continue
else:
currency_b = currency_list[i]
currency_c = currency_list[j]
g = min(dict_CR[currency_b], dict_CR[currency_c]) / max(dict_CR[currency_b], dict_CR[currency_c])
if len(currency_list) > 1:
gamma = wnc.ir_gamma_diff_ccy
else:
gamma = 1
S1 = list_S[i]
S2 = list_S[j]
K_squared_sum += gamma * S1 * S2 * g
updates['Rates']['Delta'] += sqrt(K_squared_sum)
return pd.DataFrame(updates)
def DeltaMargin(self):
updates = deepcopy(gv.dict_margin_by_risk_class)
if (('Risk_FX' not in self.list_risk_types) and \
('Risk_CreditQ' not in self.list_risk_types) and \
('Risk_CreditNonQ' not in self.list_risk_types) and \
('Risk_Equity' not in self.list_risk_types) and \
('Risk_Commodity' not in self.list_risk_types)):
return pd.DataFrame(updates)
else:
allowed_risk_classes = ['Risk_FX','Risk_CreditQ','Risk_CreditNonQ','Risk_Equity','Risk_Commodity']
list_risk_classes = [risk_class for risk_class in self.list_risk_types if risk_class in allowed_risk_classes]
for risk_class in list_risk_classes:
K_Res = 0
list_K = []
list_S = []
# FX
if risk_class == 'Risk_FX':
list_WS = []
list_CR = []
crif_fx = self.crif[(self.crif['RiskType'] == risk_class)]
currency_list = utils.unique_list(crif_fx, 'Qualifier')
for currency in currency_list:
crif_currency = crif_fx[crif_fx['Qualifier'] == currency]
T = wnc.T(risk_class,'Delta',currency=currency)
sensitivities = utils.sum_sensitivities(crif_currency)
CR = utils.concentration_threshold(sensitivities,T)
list_CR.append(CR)
is_given_currency = currency in wnc.high_vol_currency_group
is_calc_currency = self.calculation_currency in wnc.high_vol_currency_group
if currency == self.calculation_currency:
RW = 0
elif (is_given_currency==True) and (is_calc_currency==True):
RW = wnc.fx_rw['High']['High']
elif (is_given_currency==True) and (is_calc_currency==False):
RW = wnc.fx_rw['High']['Regular']
elif (is_given_currency==False) and (is_calc_currency==True):
RW = wnc.fx_rw['Regular']['High']
elif (is_given_currency==False) and (is_calc_currency==False):
RW = wnc.fx_rw['Regular']['Regular']
list_WS.append(sensitivities * CR * RW)
K = k_delta(risk_class,list_WS,list_CR=list_CR,bucket=currency_list,calculation_currency=self.calculation_currency)
updates['FX']['Delta'] += K
# CreditQ, CreditNonQ, Equity, Commodity
elif risk_class in ['Risk_CreditQ','Risk_CreditNonQ','Risk_Equity','Risk_Commodity']:
crif_others = self.crif[(self.crif['RiskType'] == risk_class)]
bucket_list = utils.bucket_list(crif_others)
for bucket in bucket_list:
if bucket == 0:
crif_bucket = crif_others[(crif_others['RiskType'] == risk_class) & (crif_others['Bucket'] == 'Residual')]
else:
crif_bucket = crif_others[(crif_others['RiskType'] == risk_class) & (crif_others['Bucket'].isin([bucket, str(bucket)]))]
# Risk Weight
RW = wnc.RW(risk_class, bucket)
# Concentration Thresholds
T = wnc.T(risk_class,'Delta',bucket=bucket)
list_WS = []
list_CR = []
index = []
qualifier_list = utils.unique_list(crif_bucket, 'Qualifier')
for qualifier in qualifier_list:
crif_qualifier = crif_bucket[crif_bucket['Qualifier'] == qualifier]
# Credit
if risk_class in ['Risk_CreditQ','Risk_CreditNonQ']:
sensitivities_CR = utils.sum_sensitivities(crif_qualifier)
CR = max(1,sqrt(abs(sensitivities_CR)/T))
list_lable2 = utils.unique_list(crif_qualifier, 'Label2')
for label2 in list_lable2:
crif_label2 = crif_qualifier[crif_qualifier['Label2'] == label2]
for tenor in utils.tenor_list(crif_qualifier):
crif_tenor = crif_label2[crif_label2['Label1'] == tenor]
sensitivities = utils.sum_sensitivities(crif_tenor)
list_WS.append(RW * sensitivities * CR)
list_CR.append(CR)
if bucket == 0:
index.append('Res')
else:
if risk_class == 'Risk_CreditQ':
index.append(qualifier)
elif risk_class == 'Risk_CreditNonQ':
index.append(label2)
# Equity, Commodity
elif risk_class in ['Risk_Equity','Risk_Commodity']:
sensitivities_EQCO = utils.sum_sensitivities(crif_qualifier)
CR = max(1,sqrt(abs(sensitivities_EQCO)/T))
list_CR.append(CR)
list_WS.append(RW * sensitivities_EQCO * CR)
K = k_delta(risk_class,list_WS,list_CR=list_CR,bucket=bucket,index=index,calculation_currency=self.calculation_currency)
if bucket == 0:
K_Res += K
else:
list_K.append(K)
S_b = max(min(sum(list_WS),K),-K)
list_S.append(S_b)
if 0 in bucket_list:
bucket_list.remove(0)
if risk_class == 'Risk_FX':
pass
else:
K_squared_sum = sum([x**2 for x in list_K])
for i, _ in enumerate(bucket_list):
for j, _ in enumerate(bucket_list):
if i == j:
continue
else:
bucket1 = bucket_list[i]
bucket2 = bucket_list[j]
if risk_class in gv.list_rates:
g = min(list_CR)/max(list_CR)
if len(self.currency_list()) > 1:
gamma = wnc.ir_gamma_diff_ccy
else:
gamma = 1
elif risk_class in gv.list_fx:
g = 1
gamma = wnc.FX_Corr[4]
elif risk_class in gv.list_credit_nonQ:
g = 1
gamma = wnc.gamma(risk_class)
else:
g = 1
gamma = wnc.gamma(risk_class,str(bucket1),str(bucket2))
S1 = list_S[i]
S2 = list_S[j]
K_squared_sum += gamma * S1 * S2 * g
if risk_class in gv.list_creditQ:
updates['CreditQ']['Delta'] += sqrt(K_squared_sum) + K_Res
elif risk_class in gv.list_credit_nonQ:
updates['CreditNonQ']['Delta'] += sqrt(K_squared_sum) + K_Res
elif risk_class in gv.list_equity:
updates['Equity']['Delta'] += sqrt(K_squared_sum) + K_Res
elif risk_class in gv.list_commodity:
updates['Commodity']['Delta'] += sqrt(K_squared_sum)
return pd.DataFrame(updates)
def IRVegaMargin(self):
updates = deepcopy(gv.dict_margin_by_risk_class)
list_K = []
dict_S = {}
dict_VCR = {}
allowed_risk_classes = [risk_class for risk_class in self.list_risk_types if risk_class in ['Risk_IRVol', 'Risk_InflationVol']]
if ('Risk_IRVol' not in self.list_risk_types) and \
('Risk_InflationVol' not in self.list_risk_types):
return pd.DataFrame(updates)
else:
for risk_class in allowed_risk_classes:
VRW = wnc.ir_vrw
currency_list = utils.unique_list(self.crif, 'Qualifier')
for currency in currency_list:
crif_currency = self.crif[(self.crif['RiskType'].isin(['Risk_IRVol','Risk_InflationVol'])) & (self.crif['Qualifier'] == currency)]
VR = []
index = []
sensitivities_CR = utils.sum_sensitivities(crif_currency)
VT = wnc.T('Rates','Vega',currency=currency)
VCR = max(1, sqrt(abs(sensitivities_CR)/VT))
dict_VCR[currency] = VCR
list_rates_risk_types = utils.unique_list([risk_class for risk_class in crif_currency['RiskType'] if risk_class in ['Risk_IRVol','Risk_InflationVol']])
for risk_class in list_rates_risk_types:
crif_riskClass = crif_currency[crif_currency['RiskType'] == risk_class]
tenor_list = utils.tenor_list(crif_riskClass)
for tenor in tenor_list:
crif_tenor = crif_riskClass[crif_riskClass['Label1'] == tenor]
sensitivities = utils.sum_sensitivities(crif_tenor)
VR.append(VRW * sensitivities * VCR)
if risk_class == 'Risk_IRVol':
index.append(tenor)
elif risk_class == 'Risk_InflationVol':
index.append('Inf')
K = k_vega('Rates',VR,index=index)
list_K.append(K)
S = max(min(sum(VR), K), -K)
dict_S[currency] = S
K_squared_sum = sum([K**2 for K in list_K])
for b in range(len(currency_list)):
for c in range(len(currency_list)):
if b == c:
continue
else:
currency_b = currency_list[b]
currency_c = currency_list[c]
g = min(dict_VCR[currency_b], dict_VCR[currency_c]) / max(dict_VCR[currency_b], dict_VCR[currency_c])
gamma = wnc.ir_gamma_diff_ccy
K_squared_sum += gamma * dict_S[currency_b] * dict_S[currency_c] * g
updates['Rates']['Vega'] += sqrt(K_squared_sum)
return | pd.DataFrame(updates) | pandas.DataFrame |
from unittest import TestCase
import boto
import pandas as pd
from pandas.util.testing import assert_frame_equal
from dis_ds import parsing
import tempfile
import os
lines = ['bakerloo',
'central',
'circle',
'district',
'hammersmith-city',
'jubilee',
'metropolitan',
'northern',
'piccadilly',
'victoria',
'waterloo-city']
class TestStatusSeverities(TestCase):
def test_empty_file(self):
empty_file = ""
result = parsing.get_status_severities(empty_file)
self.assertEqual(result, {})
return
def test_single_severity(self):
single_severity_file = '[{"lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]'
result = parsing.get_status_severities(single_severity_file)
expected = {6: "Severe Delays"}
self.assertEqual(expected, result)
return
def test_multiple_severity(self):
multiple_severity_file = """
[{"lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"lineStatuses":[{"statusSeverity":10, "statusSeverityDescription":"Good Service"}]}]
"""
result = parsing.get_status_severities(multiple_severity_file)
expected = {6: 'Severe Delays', 10: 'Good Service'}
self.assertEqual(expected, result)
return
def test_multiple_severities_for_a_single_line(self):
multiple_severity_file = """
[{"lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"},
{"statusSeverity":9, "statusSeverityDescription":"Minor Delays"}]}]
"""
result = parsing.get_status_severities(multiple_severity_file)
expected = {6: 'Severe Delays', 9: 'Minor Delays'}
self.assertEqual(expected, result)
return
def test_real_file(self):
file = tempfile.NamedTemporaryFile(delete=False)
file_name = file.name
file.write(b'[{"lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]')
file.close()
result = parsing.get_status_severities(file_name)
expected = {6: "Severe Delays"}
self.assertEqual(expected, result)
os.unlink(file_name)
return
def test_multiple_files(self):
file1 = tempfile.NamedTemporaryFile(delete=False)
file2 = tempfile.NamedTemporaryFile(delete=False)
file_names = [file1.name, file2.name]
file1.write(b'[{"lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]')
file2.write(b'[{"lineStatuses":[{"statusSeverity":9, "statusSeverityDescription":"Minor Delays"}]}]')
file1.close()
file2.close()
result = parsing.get_severities_from_files(file_names)
expected = {6: "Severe Delays", 9: "Minor Delays"}
self.assertEqual(expected, result)
for fname in file_names:
os.unlink(fname)
return
class TestDateParsing(TestCase):
def test_parse_date(self):
filename = 'tfl_api_line_mode_status_tube_2015-02-24_12:03:14.json'
result = parsing.get_datetime_from_filename(filename)
expected = pd.datetime(2015, 2, 24, 12, 3, 14)
self.assertEqual(result, expected)
return
def test_parse_date_from_path(self):
filename = '/tmp/tfl_api_line_mode_status_tube_2015-02-24_12:03:14.json'
result = parsing.get_datetime_from_filename(filename)
expected = pd.datetime(2015, 2, 24, 12, 3, 14)
self.assertEqual(result, expected)
return
class TestParseFile(TestCase):
def setUp(self):
tempdir = tempfile.gettempdir()
filename = 'tfl_api_line_mode_status_tube_2015-02-24_12:03:14.json'
self.filepath = os.path.join(tempdir, filename)
self.file_datetime = pd.datetime(2015, 2, 24, 12, 3, 14)
self.empty_df = pd.DataFrame({l: None for l in lines}, index=[self.file_datetime]).astype(float)
def tearDown(self):
try:
os.unlink(self.filepath)
except FileNotFoundError:
pass
def test_empty_file(self):
with open(self.filepath, "w") as f:
f.write('')
result = parsing.parse_file(self.filepath)
self.assertTrue(result.equals(self.empty_df))
return
def test_single_line(self):
with open(self.filepath, "w") as f:
disruption = """
[{"id": "bakerloo", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]
"""
f.write(disruption)
result = parsing.parse_file(self.filepath)
line_values = self.empty_df
line_values['bakerloo'] = 6.0
assert_frame_equal(result, line_values)
return
def test_multiple_statuses_for_single_line(self):
with open(self.filepath, "w") as f:
disruption = """
[{"id": "bakerloo", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"},
{"statusSeverity":10, "statusSeverityDescription":"Good Service"}]}]
"""
f.write(disruption)
result = parsing.parse_file(self.filepath)
line_values = self.empty_df
line_values['bakerloo'] = 6.0
assert_frame_equal(result, line_values)
return
def test_multiple_statuses_reverse_order(self):
with open(self.filepath, "w") as f:
disruption = """
[{"id": "bakerloo", "lineStatuses":[{"statusSeverity":10, "statusSeverityDescription":"Good Service"},
{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]
"""
f.write(disruption)
result = parsing.parse_file(self.filepath)
line_values = self.empty_df
line_values['bakerloo'] = 6.0
assert_frame_equal(result, line_values)
return
def test_multiple_lines(self):
with open(self.filepath, "w") as f:
disruption = """
[{"id": "bakerloo", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "circle", "lineStatuses":[{"statusSeverity":10, "statusSeverityDescription":"Good Service"}]}]
"""
f.write(disruption)
result = parsing.parse_file(self.filepath)
line_values = self.empty_df
line_values['bakerloo'] = 6.0
line_values['circle'] = 10.0
assert_frame_equal(result, line_values)
return
class TestParseMultipleFiles(TestCase):
def setUp(self):
tempdir = tempfile.gettempdir()
filename1 = 'tfl_api_line_mode_status_tube_2015-02-24_12:03:14.json'
filename2 = 'tfl_api_line_mode_status_tube_2015-02-25_12:00:00.json'
self.filepath1 = os.path.join(tempdir, filename1)
self.file_datetime1 = pd.datetime(2015, 2, 24, 12, 3, 14)
self.filepath2 = os.path.join(tempdir, filename2)
self.file_datetime2 = pd.datetime(2015, 2, 25, 12, 0, 0)
self.default_lines = pd.DataFrame({l: 6 for l in lines}, index=[self.file_datetime1, self.file_datetime2]).astype(float)
def tearDown(self):
try:
os.unlink(self.filepath1)
os.unlink(self.filepath2)
except FileNotFoundError:
pass
def test_multiple_files(self):
with open(self.filepath1, "w") as f:
disruption = """
[{"id": "bakerloo", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "central", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "circle", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "district", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "hammersmith-city", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "jubilee", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "metropolitan", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "northern", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "piccadilly", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "victoria", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "waterloo-city", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]
"""
f.write(disruption)
with open(self.filepath2, "w") as f:
disruption = """
[{"id": "bakerloo", "lineStatuses":[{"statusSeverity":10, "statusSeverityDescription":"Good Service"}]},
{"id": "central", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "circle", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "district", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "hammersmith-city", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "jubilee", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "metropolitan", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "northern", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "piccadilly", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "victoria", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]},
{"id": "waterloo-city", "lineStatuses":[{"statusSeverity":6, "statusSeverityDescription":"Severe Delays"}]}]
"""
f.write(disruption)
result = parsing.parse_file_list([self.filepath1, self.filepath2])
line_values = self.default_lines
line_values.ix[1]['bakerloo'] = 10
print('result = {}'.format(result.dtypes))
print('line_values = {}'.format(line_values.dtypes))
assert_frame_equal(result, line_values)
return
class TestAWSParsing(TestCase):
testfile = 's3://pivotal-london-dis/tfl_api_line_mode_status_tube_2015-02-24_11:51:45.json'
def test_aws_connectivity(self):
newfile = | pd.read_json(self.testfile) | pandas.read_json |
import warnings
import pandas as pd
from sklearn import cluster, metrics, preprocessing
from sklearn.decomposition import PCA
import visualizer
ftr_full_name = 'full_name'
ftr_stars_count = 'stars_count'
ftr_forks_count = 'forks_count'
ftr_contributors_count = 'contributors_count'
ftr_commits_count = 'commits_count'
ftr_days_count = 'days_count'
ftr_is_org = 'is_org'
ftr_readme_path = 'readme_path'
ftr_topics = 'topics'
ftr_readme_topics = 'readme_topics'
remove_columns = [
# ftr_readme_topics,
# ftr_is_org,
ftr_full_name,
ftr_readme_path,
ftr_topics]
numeric_columns = [
ftr_stars_count,
ftr_forks_count,
ftr_contributors_count,
ftr_commits_count,
ftr_days_count]
CATEGORICAL_COLUMNS = [ftr_readme_topics]
random_state = 360
def encode_data(data):
encoder = preprocessing.LabelEncoder()
for category in CATEGORICAL_COLUMNS:
data[category] = encoder.fit_transform(data[category])
return data
def preprocess(data):
data.drop(remove_columns, axis=1, inplace=True)
data = encode_data(data)
# Linux has 'infinite' contributors, some sources estimate ~10k
data.replace('∞', 10000, inplace=True)
return data
def normalize(data):
scaler = preprocessing.MinMaxScaler()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Fit on train and transform both
train_scaled = scaler.fit_transform(data)
data = pd.DataFrame(train_scaled, columns=data.columns)
return data
def silhouette_score(estimator, X):
labels = estimator.fit_predict(X)
score = metrics.silhouette_score(X, labels, metric='euclidean')
return score
def calculate_sse(data):
sse = {}
for k in range(2, 15):
model = cluster.KMeans(n_clusters=k, random_state=random_state).fit(data)
# Inertia: Sum of distances of samples to their closest cluster center
sse[k] = model.inertia_
visualizer.visualize_sse(sse)
def print_silhouette_score(data, labels, model_name):
score = metrics.silhouette_score(data, labels, metric='euclidean')
print(f'{model_name} Silhouette Coefficient: {score}')
return score
def clusterize(data, n_clusters=2):
calculate_sse(data)
# Hierarchical
model = cluster.AgglomerativeClustering(n_clusters=n_clusters)
calculate_sse(data)
predictions = model.fit_predict(data)
print_silhouette_score(data, predictions, 'hierarchical')
# K-means
model = cluster.KMeans(n_clusters=n_clusters, random_state=random_state)
predictions = model.fit_predict(data)
print_silhouette_score(data, predictions, 'kmeans')
return predictions
def reduce_dimensionality(data, n_components=3):
pca = PCA(random_state=random_state, svd_solver='full', whiten=True, n_components=n_components)
reduced = pca.fit_transform(data)
return | pd.DataFrame(reduced) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import stats
from prostate_cancer_nomograms.statistical_analysis.base.base_statistics import BaseStatistics
class DescriptiveStatistics(BaseStatistics):
def __init__(self, dataframe: pd.DataFrame):
super().__init__(dataframe)
def get_descriptive_stats_dataframe_from_given_columns(self, list_of_columns: list) -> pd.DataFrame:
reduced_dataset = self.dataframe[list_of_columns]
descriptive_stats_dataframe = reduced_dataset.describe().transpose().round(decimals=1)
descriptive_stats_dataframe.insert(loc=0, column="Variable", value=descriptive_stats_dataframe.index)
return descriptive_stats_dataframe
@staticmethod
def _get_p_value_from_mann_whitney_u_test(
column_name: str,
negative_outcome_dataframe: pd.DataFrame,
positive_outcome_dataframe: pd.DataFrame
) -> float:
_, p_value = stats.mannwhitneyu(
x=negative_outcome_dataframe[column_name].dropna(),
y=positive_outcome_dataframe[column_name].dropna()
)
return p_value
@staticmethod
def _get_dataframe_with_strings_converted_to_numbers_in_given_column(
column_name: str,
dataframe: pd.DataFrame
) -> pd.DataFrame:
if dataframe[column_name].dtype == object:
numeric_value_mask = [value.replace(".", "", 1).isdigit() for value in dataframe[column_name].values]
if any(numeric_value_mask):
dataframe[column_name].values[not numeric_value_mask] = np.nan
dataframe[column_name].values[numeric_value_mask] = [
float(value) for value in dataframe[column_name].values[numeric_value_mask]
]
dataframe[column_name] = pd.to_numeric(dataframe[column_name], errors='coerce')
else:
pass
else:
pass
return dataframe
def _get_dataframes_subset_from_given_columns(self, list_of_columns: list, outcome: str):
self.outcome = outcome
negative_outcome_dataframe_subset = self.outcome_specific_dataframes.negative_outcome_dataframe[list_of_columns]
positive_outcome_dataframe_subset = self.outcome_specific_dataframes.positive_outcome_dataframe[list_of_columns]
for column in list_of_columns:
negative_outcome_dataframe_subset = self._get_dataframe_with_strings_converted_to_numbers_in_given_column(
column_name=column,
dataframe=negative_outcome_dataframe_subset
)
positive_outcome_dataframe_subset = self._get_dataframe_with_strings_converted_to_numbers_in_given_column(
column_name=column,
dataframe=positive_outcome_dataframe_subset
)
outcome_specific_dataframes = self.OutcomeDataFrames(
negative_outcome_dataframe=negative_outcome_dataframe_subset,
positive_outcome_dataframe=positive_outcome_dataframe_subset,
)
return outcome_specific_dataframes
def get_descriptive_stats_dataframe_from_specific_outcome(
self,
list_of_columns: list,
outcome: str
) -> pd.DataFrame:
outcome_specific_dataframes = self._get_dataframes_subset_from_given_columns(
list_of_columns=list_of_columns,
outcome=outcome
)
negative_outcome_dataframe = outcome_specific_dataframes.negative_outcome_dataframe
positive_outcome_dataframe = outcome_specific_dataframes.positive_outcome_dataframe
stats_negative_outcome = negative_outcome_dataframe.describe().transpose().round(decimals=2).reset_index()
stats_positive_outcome = positive_outcome_dataframe.describe().transpose().round(decimals=2).reset_index()
stats_negative_outcome.insert(
loc=0,
column='Level',
value=self.outcome_specific_dataframes_information.value_of_negative_outcome
)
stats_positive_outcome.insert(
loc=0,
column='Level',
value=self.outcome_specific_dataframes_information.value_of_positive_outcome
)
concat_df = pd.concat([stats_negative_outcome, stats_positive_outcome]).sort_index().set_index('index')
concat_df.index = ["" if idx % 2 != 0 else label for idx, label in enumerate(concat_df.index)]
concat_df.insert(loc=0, column="Variable", value=concat_df.index)
p_values = []
for idx, label in enumerate(concat_df.index):
if idx % 2 != 0:
p_value = ""
else:
p_value = self._get_p_value_from_mann_whitney_u_test(
column_name=label,
negative_outcome_dataframe=negative_outcome_dataframe,
positive_outcome_dataframe=positive_outcome_dataframe
)
p_values.append(p_value)
concat_df["p-value"] = p_values
return concat_df
def _get_count_dataframe(self, variable_name, outcome_specific: bool = False) -> pd.DataFrame:
if outcome_specific:
data = [
self.negative_outcome_dataframe[variable_name].value_counts(),
self.positive_outcome_dataframe[variable_name].value_counts()
]
count_dataframe_int: pd.DataFrame(dtype=int) = pd.concat(data, axis=1).fillna(0).applymap(int)
count_dataframe_str: pd.DataFrame(dtype=str) = count_dataframe_int.applymap(str)
for column_idx, _ in enumerate(count_dataframe_int.columns):
column_sum = count_dataframe_int.iloc[:, column_idx].sum()
count_dataframe_str.iloc[:, column_idx] = count_dataframe_str.iloc[:, column_idx] + f"/{column_sum}"
else:
count_dataframe_int = self.dataframe[variable_name].value_counts().fillna(0).apply(int)
count_dataframe_str: pd.DataFrame(dtype=str) = count_dataframe_int.apply(str)
column_sum = count_dataframe_int.sum()
count_dataframe_str = count_dataframe_str + f"/{column_sum}"
return count_dataframe_str
def _get_percentage_dataframe(self, variable_name, outcome_specific: bool = False) -> pd.DataFrame:
if outcome_specific:
data = [
round(self.negative_outcome_dataframe[variable_name].value_counts(normalize=True)*100, ndigits=1),
round(self.positive_outcome_dataframe[variable_name].value_counts(normalize=True)*100, ndigits=1)
]
percentage_dataframe: pd.DataFrame(dtype=int) = pd.concat(data, axis=1).fillna(0)
else:
percentage_dataframe = round(self.dataframe[variable_name].value_counts(normalize=True)*100, ndigits=1)
return percentage_dataframe
def _get_count_and_percentage_dataframe_from_variable_name(
self,
variable_name: str,
outcome_specific: bool = False
) -> pd.DataFrame:
count_and_percentage_dataframe = pd.merge(
left=self._get_count_dataframe(variable_name=variable_name, outcome_specific=outcome_specific),
right=self._get_percentage_dataframe(variable_name=variable_name, outcome_specific=outcome_specific),
left_index=True,
right_index=True
)
return count_and_percentage_dataframe
@staticmethod
def _get_frequency_table_with_concatenated_list(
frequency_table: pd.DataFrame,
values: list,
first_column: bool = False
) -> pd.DataFrame:
series = pd.Series(data=values, index=frequency_table.index)
if first_column:
data = [series, frequency_table]
else:
data = [frequency_table, series]
frequency_table = pd.concat(data, axis=1, ignore_index=True)
return frequency_table
def get_frequency_table(self, list_of_columns: list) -> pd.DataFrame:
dataframes = []
for variable_idx, variable_name in enumerate(list_of_columns):
frequency_table = self._get_count_and_percentage_dataframe_from_variable_name(
variable_name=variable_name,
outcome_specific=False
)
frequency_table = self._get_frequency_table_with_concatenated_list(
frequency_table=frequency_table,
values=list(frequency_table.index),
first_column=True
)
number_of_levels = len(frequency_table.index)
variable = [""] * number_of_levels
variable[0] = variable_name
frequency_table = self._get_frequency_table_with_concatenated_list(
frequency_table=frequency_table,
values=variable,
first_column=True
)
dataframes.append(frequency_table)
dataframe = | pd.concat(dataframes) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_category,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_category_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,countAverage,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,buyRate,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_category_feture.fillna(0,inplace=True)
return item_category_feture
def item_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_id,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = | pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type) | pandas.crosstab |
# %% [markdown]
# # THE MIND OF A MAGGOT
# %% [markdown]
# ## Imports
import os
import time
import warnings
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, NodeMixin
from joblib import Parallel, delayed
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import squareform
from sklearn.cluster import AgglomerativeClustering
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import adjusted_rand_score, pairwise_distances
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from graspy.cluster import AutoGMMCluster, GaussianCluster
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
select_dimension,
selectSVD,
)
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import augment_diagonal, binarize, pass_to_ranks
from src.cluster import get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.traverse import (
Cascade,
RandomWalk,
TraverseDispatcher,
to_markov_matrix,
to_path_graph,
to_transmission_matrix,
)
from src.visualization import (
CLASS_COLOR_DICT,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
# from tqdm import tqdm
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name)
def invert_permutation(p):
"""The argument p is assumed to be some permutation of 0, 1, ..., len(p)-1.
Returns an array s, where s[i] gives the index of i in p.
"""
p = np.asarray(p)
s = np.empty(p.size, p.dtype)
s[p] = np.arange(p.size)
return s
# %% [markdown]
# ##
from graspy.simulations import sbm
def get_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=5):
B = np.zeros((n_blocks, n_blocks))
B += low_p
B -= np.diag(np.diag(B))
B -= np.diag(np.diag(B, k=1), k=1)
B += np.diag(diag_p * np.ones(n_blocks))
B += np.diag(feedforward_p * np.ones(n_blocks - 1), k=1)
return B
low_p = 0.01
diag_p = 0.1
feedforward_p = 0.3
n_blocks = 6
max_hops = 15
n_init = 100
basename = f"-{feedforward_p}-{diag_p}-{low_p}-{n_blocks}-{max_hops}-{n_init}"
block_probs = get_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=n_blocks)
block_probs[1, 4] = 0.3
block_probs[2, 3] = 0.01
block_probs[2, 5] = 0.3
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
sns.heatmap(block_probs, annot=True, cmap="Reds", cbar=False, ax=axs[0], square=True)
axs[0].xaxis.tick_top()
axs[0].set_title("Block probability matrix", pad=25)
community_sizes = np.empty(2 * n_blocks, dtype=int)
n_per_block = 100
community_sizes = n_blocks * [n_per_block]
np.random.seed(88)
adj, labels = sbm(
community_sizes, block_probs, directed=True, loops=False, return_labels=True
)
n_verts = adj.shape[0]
matrixplot(
adj,
row_sort_class=labels,
col_sort_class=labels,
cbar=False,
ax=axs[1],
square=True,
)
axs[1].set_title("Adjacency matrix", pad=25)
plt.tight_layout()
stashfig("sbm" + basename)
# %% [markdown]
# ## Run paths
print(f"Running {n_init} random walks from each source node...")
transition_probs = to_markov_matrix(adj)
out_inds = np.where(labels == n_blocks - 1)[0]
source_inds = np.where(labels == 0)[0]
def rw_from_node(s):
paths = []
rw = RandomWalk(
transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=False
)
for n in range(n_init):
rw.start(s)
paths.append(rw.traversal_)
return paths
par = Parallel(n_jobs=-1, verbose=10)
paths_by_node = par(delayed(rw_from_node)(s) for s in source_inds)
paths = []
for p in paths_by_node:
paths += p
print(len(paths))
# %% [markdown]
# ## Look at distribution of path lengths
path_lens = []
for p in paths:
path_lens.append(len(p))
sns.distplot(path_lens, kde=False)
paths_by_len = {i: [] for i in range(1, max_hops + 1)}
for p in paths:
paths_by_len[len(p)].append(p)
# %% [markdown]
# ##
embedder = AdjacencySpectralEmbed(n_components=None, n_elbows=2)
embed = embedder.fit_transform(pass_to_ranks(adj))
embed = np.concatenate(embed, axis=-1)
pdist = pairwise_distances(embed, metric="cosine")
triu_inds = np.triu_indices_from(pdist, k=1)
all_path_dists = pdist[triu_inds]
med = np.median(all_path_dists)
# %% [markdown]
# ##
# from skbio.sequence import Sequence
from alignment.sequence import Sequence
from alignment.vocabulary import Vocabulary
seqs = []
for p in paths:
s = Sequence(p)
seqs.append(s)
v = Vocabulary()
encoded_seqs = [v.encodeSequence(s) for s in seqs]
class SimpleScoring:
def __init__(self, matchScore, mismatchScore):
self.matchScore = matchScore
self.mismatchScore = mismatchScore
def __call__(self, firstElement, secondElement):
if firstElement == secondElement:
return self.matchScore
else:
return self.mismatchScore
# triu_inds = np.triu_indices_from(dist_mat, k=1)
# all_dists = dist_mat[triu_inds]
# med = np.median(all_dists)
# self. = med
class DistScoring:
def __init__(self, dist_mat):
dist_mat = 1000 - dist_mat * 1000
dist_mat = dist_mat.astype(int)
self.dist_mat = dist_mat
def __call__(self, first, second):
return self.dist_mat[first, second]
from alignment.sequencealigner import GlobalSequenceAligner
choice_inds = np.random.choice(len(seqs), int(1e3), replace=False)
new_seqs = []
for i, s in enumerate(seqs):
if i in choice_inds:
new_seqs.append(s)
seqs = new_seqs
nw_scores = np.zeros((len(seqs), len(seqs)))
aligner = GlobalSequenceAligner(DistScoring(pdist), 1000 - med * 1000)
for i in tqdm(range(len(seqs))):
for j in range(i, len(seqs)):
score, encodeds = aligner.align(seqs[i], seqs[j], backtrace=True)
s = score / (1000 * max(len(seqs[i]), len(seqs[j])))
nw_scores[i, j] = s
# %% [markdown]
# ##
from graspy.utils import symmetrize
sns.heatmap(nw_scores)
nw_scores = symmetrize(nw_scores, "triu")
nw_dists = 1 - nw_scores
# %% [markdown]
# ##
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
sns.heatmap(nw_dists)
Z = linkage(squareform(nw_dists), method="average")
sns.clustermap(nw_dists, row_linkage=Z, col_linkage=Z)
# %% [markdown]
# ##
pal = sns.color_palette("husl", n_colors=max(map(len, seqs)))
# %% [markdown]
# ##
manifold = TSNE(metric="precomputed")
# manifold = ClassicalMDS(n_components=2, dissimilarity="precomputed")
cos_embed = manifold.fit_transform(pdist)
# %% [markdown]
# ##
paths = seqs
plot_df = pd.DataFrame(data=cos_embed)
plot_df["labels"] = labels
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
ax = axs[0]
sns.scatterplot(
data=plot_df,
x=0,
y=1,
hue="labels",
palette="Set1",
# legend="full",
ax=ax,
s=25,
linewidth=0.5,
alpha=0.8,
)
ax.get_legend().remove()
ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
ax.get_legend().get_texts()[0].set_text("Block")
ax.axis("off")
for b in np.unique(labels):
mean_series = plot_df[plot_df["labels"] == b].mean()
x = mean_series[0] + 4
y = mean_series[1] + 4
ax.text(x, y, b, fontsize=20)
ax = axs[1]
sns.scatterplot(
data=plot_df,
x=0,
y=1,
# hue="labels",
color="grey",
palette="Set1",
ax=ax,
s=25,
linewidth=0.5,
alpha=0.8,
)
# ax.get_legend().remove()
# ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
# ax.get_legend().get_texts()[0].set_text("Block")
# ax.axis("equal")
ax.axis("off")
for b in np.unique(labels):
mean_series = plot_df[plot_df["labels"] == b].mean()
x = mean_series[0] + 4
y = mean_series[1] + 4
ax.text(x, y, b, fontsize=20)
# pal = sns.color_palette("husl", n_colors=path_len)
# pal = [pal[0], pal[2], pal[4], pal[6], pal[1], pal[3], pal[5], pal[7], (0, 0, 0)]
# pal = pal[:path_len]
plot_path_inds = np.random.choice(len(paths), size=500, replace=False)
for i, p in enumerate(paths):
if i in plot_path_inds:
pal = sns.color_palette("husl", n_colors=len(p))
for t, (start, end) in enumerate(nx.utils.pairwise(p)):
x1, y1 = plot_df.loc[start, [0, 1]]
x2, y2 = plot_df.loc[end, [0, 1]]
ax.plot(
[x1, x2],
[y1, y2],
color=pal[t],
linewidth=0.2,
alpha=0.6,
label=t + 1 if i == plot_path_inds[0] else "",
)
leg = ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Link order")
for lh in leg.legendHandles:
lh.set_alpha(1)
lh.set_linewidth(3)
stashfig("embed-sbm-nodes")
# %% [markdown]
# ##
path_len = 10
path_indicator_mat = np.zeros((len(paths), len(adj)), dtype=int)
for i, p in enumerate(paths):
for j, visit in enumerate(p):
path_indicator_mat[i, visit] = j + 1
pal = sns.color_palette("husl", path_len)
node_meta = | pd.DataFrame() | pandas.DataFrame |
import jax.numpy as np
import qtensornetwork.components
import qtensornetwork.circuit
import qtensornetwork.ansatz
import qtensornetwork.util
import qtensornetwork.optimizer
from qtensornetwork.gate import *
from jax.config import config
config.update("jax_enable_x64", True)
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
def generate_binary_mnist(f_label, s_label, train_num, test_num, width, height):
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
df = pd.DataFrame(columns=["label"])
df["label"] = y_train.reshape([-1])
list_f = df.loc[df.label==f_label].sample(n=train_num)
list_s = df.loc[df.label==s_label].sample(n=train_num)
label_list = pd.concat([list_f, list_s])
label_list = label_list.sort_index()
label_idx = label_list.index.values
train_label = label_list.label.values
x_train = x_train[label_idx]
y_train= train_label
y_train = np.array([[0, 1] if i==f_label else [1, 0] for i in y_train])
df = pd.DataFrame(columns=["label"])
df["label"] = y_test.reshape([-1])
list_f = df.loc[df.label==f_label].sample(n=test_num)
list_s = df.loc[df.label==s_label].sample(n=test_num)
label_list = | pd.concat([list_f, list_s]) | pandas.concat |
import pandas as pd
codes = pd.read_csv("./data/London_District_codes.csv")
socio = | pd.read_spss("./data/London_ward_data_socioeconomic.sav") | pandas.read_spss |
"""
Sigproc results are generated in parallel by field.
At save time, the radmats for all fields is composited into one big radmat.
"""
import itertools
from collections import OrderedDict
import numpy as np
import pandas as pd
from plaster.run.base_result import BaseResult, disk_memoize
from plaster.run.sigproc_v2.sigproc_v2_params import SigprocV2Params
from plaster.run.priors import Priors
from plaster.tools.image.coord import HW, ROI, YX
from plaster.tools.schema import check
from plaster.tools.utils import stats, utils
from plaster.tools.utils.fancy_indexer import FancyIndexer
from plumbum import local
class SigprocV2Result(BaseResult):
"""
Understanding alignment coordinates
Each field has n_channels and n_cycles
The channels are all aligned already (stage doesn't move between channels)
But the stage does move between cycles and therefore an alignment is needed.
The stack of cycle images are aligned in coordinates relative to the 0th cycles.
The fields are stacked into a composite image large enough to hold the worst-case shift.
Each field in the field_df has a shift_x, shift_y.
The maximum absolute value of all of those shifts is called the border.
The border is the amount added around all edges to accomdate all images.
"""
name = "sigproc_v2"
filename = "sigproc_v2.pkl"
# fmt: off
required_props = OrderedDict(
# Note that these do not include props in the save_field
params=SigprocV2Params,
n_channels=(type(None), int),
n_cycles=(type(None), int),
n_fields=(type(None), int),
focus_per_field_per_channel=(type(None), list),
)
peak_df_schema = OrderedDict(
peak_i=int,
field_i=int,
field_peak_i=int,
aln_y=float,
aln_x=float,
)
peak_fit_df_schema = OrderedDict(
peak_i=int,
field_i=int,
field_peak_i=int,
amp=float,
std_x=float,
std_y=float,
pos_x=float,
pos_y=float,
rho=float,
const=float,
mea=float,
)
field_df_schema = OrderedDict(
field_i=int,
channel_i=int,
cycle_i=int,
aln_y=float,
aln_x=float,
)
radmat_df_schema = OrderedDict(
peak_i=int,
channel_i=int,
cycle_i=int,
signal=float,
noise=float,
snr=float,
bg_med=float,
bg_std=float,
)
loc_df_schema = OrderedDict(
peak_i=int,
loc_x=float,
loc_y=float,
loc_ch_0_x=float,
loc_ch_0_y=float,
ambiguous=int,
loc_ch_1_x=float,
loc_ch_1_y=float,
)
# mask_rects_df_schema = dict(
# field_i=int,
# channel_i=int,
# cycle_i=int,
# l=int,
# r=int,
# w=int,
# h=int,
# )
# fmt: on
def __hash__(self):
return hash(id(self))
def _field_filename(self, field_i, is_debug):
return self._folder / f"{'_debug_' if is_debug else ''}field_{field_i:03d}.ipkl"
def save_field(self, field_i, _save_debug=True, **kwargs):
"""
When using parallel field maps we can not save into the result
because that will not be serialized back to the main thread.
Rather, use temporary files and gather at save()
Note that there is no guarantee of the order these are created.
"""
# CONVERT raw_mask_rects to a DataFrame
# rows = [
# (field_i, ch, cy, rect[0], rect[1], rect[2], rect[3])
# for ch, cy_rects in enumerate(kwargs.pop("raw_mask_rects"))
# for cy, rects in enumerate(cy_rects)
# for i, rect in enumerate(rects)
# ]
# kwargs["mask_rects_df"] = pd.DataFrame(
# rows, columns=["field_i", "channel_i", "cycle_i", "l", "r", "w", "h"]
# )
non_debug_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
utils.indexed_pickler_dump(
non_debug_kwargs, self._field_filename(field_i, is_debug=False)
)
if _save_debug:
debug_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")}
utils.indexed_pickler_dump(
debug_kwargs, self._field_filename(field_i, is_debug=True)
)
def save(self, save_full_signal_radmat_npy=False):
"""
Extract the radmat from the fields and stack them in one giant mat
"""
self.field_files = [i.name for i in sorted(self._folder // "field*.ipkl")]
self.debug_field_files = [
i.name for i in sorted(self._folder // "_debug_field*.ipkl")
]
if save_full_signal_radmat_npy:
radmat = self.sig()
np.save(
str(self._folder / "full_signal_radmat.npy"), radmat, allow_pickle=False
)
if self.calib_priors is not None:
check.t(self.calib_priors, Priors)
utils.yaml_save(
str(self._folder / "calib.yaml"), self.calib_priors.serialize()
)
super().save()
def __init__(self, folder=None, is_loaded_result=False, **kwargs):
super().__init__(folder, is_loaded_result=is_loaded_result, **kwargs)
self._cache_ims = {}
def __repr__(self):
try:
return f"SigprocV2Result with files in {self._folder} with {self.n_fields} fields"
except Exception as e:
return "SigprocV2Result"
def limit(self, field_i_start=0, n_field_files=1):
self.field_files = self.field_files[
field_i_start : field_i_start + n_field_files
]
def _cache(self, prop, val=None):
# TASK: This might be better done with a yielding context
cache_key = f"_load_prop_cache_{prop}"
if val is not None:
self[cache_key] = val
return val
cached = self.get(cache_key)
if cached is not None:
return cached
return None
@property
def n_cols(self):
return self.n_cycles * self.n_channels
@property
def n_frames(self):
return (
self.n_fields
* self.params.n_output_channels
* np.max(self.fields().cycle_i)
+ 1
)
def fl_ch_cy_iter(self):
return itertools.product(
range(self.n_fields), range(self.n_channels), range(self.n_cycles)
)
def _has_prop(self, prop):
# Assume field 0 is representative of all fields
field_i = 0
name = local.path(self.field_files[field_i]).name
props = utils.indexed_pickler_load(
self._folder / name, prop_list=[prop], skip_missing_props=True
)
return prop in props.keys()
def _load_field_prop(self, field_i, prop):
"""Mockpoint"""
if prop.startswith("_"):
name = local.path(self.debug_field_files[field_i]).name
else:
name = local.path(self.field_files[field_i]).name
return utils.indexed_pickler_load(self._folder / name, prop_list=prop)
def _load_df_prop_from_fields(self, prop, field_iz=None):
"""
Stack the DF that is in prop along all fields
"""
if field_iz is None:
field_iz = tuple(range(self.n_fields))
cache_key = f"{prop}_field_iz_{field_iz}"
val = self._cache(cache_key)
if val is None:
dfs = [self._load_field_prop(field_i, prop) for field_i in field_iz]
# If you concat an empty df with others, it will wreak havoc
# on your column dtypes (e.g. int64->float64)
non_empty_dfs = [df for df in dfs if len(df) > 0]
if len(non_empty_dfs) > 0:
val = | pd.concat(non_empty_dfs, sort=False) | pandas.concat |
# for adding data(bills,elevator,etc.) as input please type append() in python console
# l1 = list(a[a['related unit'] != 'All']['related unit'].str.split(','))
# l2 = [eval(i) for i in l1[0]]
#df1 = df.iloc[n:m]
#df.index = np.arange(len(df1))
#& user_input_df['category'] == ......
def append():
""" This function accepts inputs from the user. """
import pandas as pd
import datetime as dt
d = {'amount': [], 'time':[], 'category': [] , 'subcategory': [],
'responsible unit': [], 'related unit': [[]],
'div': [], 'description': []}
amount = int(input('amount:'))
d['amount'].append(amount)
time = input('time( Example: 1399/09/21 ) : ')
d['time'].append(dt.date(int(time[0:4]),int(time[5:7]), int(time[8:])))
category = input("category: 1) bill 2) cleaning 3) elevator 4) parking 5) repairs 6) charge 7) other [1/2/3/4/5/6/7] :")
if category == '1':
d['category'].append('bill')
elif category == '2':
d['category'].append('cleaning')
elif category == '3':
d['category'].append('elevator')
elif category == '4':
d['category'].append('parking')
elif category == '5':
d['category'].append('repairs')
elif category == '6':
d['category'].append('charge')
elif category == '7':
d['category'].append('other')
if category == '1':
subcategory = input('subcategory: 1) water 2) gas 3) electricity 4) tax [1/2/3/4] :')
if subcategory == '1':
subcategory = 'water'
elif subcategory == '2':
subcategory = 'gas'
elif subcategory == '3':
subcategory = 'electricity'
elif subcategory == '4':
subcategory = 'tax'
else:
subcategory = 'undefind'
d['subcategory'].append(subcategory)
responsible_unit = input('responsible unit:')
d['responsible unit'].append(responsible_unit)
related_unit = input('related unit:(please enter the related units as the form first unit number, second unit number,....Note that if you want to include all units you should enter the number of all units)').split(',')
for e in related_unit:
d['related unit'][0].append(eval(e))
div = input('div: 1) -e 2) -r 3) -d 4) -a 5) -p [1/2/3/4/5] :(Note that if you have selected charge as a category, -d must be chosen as the division type.)')
if div == '1':
div = 'equal'
d['div'].append(div)
elif div == '2':
div = 'number'
d['div'].append(div)
elif div == '3':
div = 'default'
d['div'].append(div)
elif div == '4':
div = 'area'
d['div'].append(div)
elif div == '5':
div = 'parking'
d['div'].append(div)
description = input('description:')
d['description'].append(description)
i = input('Is there anything left? A)yes B)no [A/B] :')
if i == 'B':
pd.DataFrame(d).to_csv(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژه\user input data.csv',mode = 'a', header= False, index = False)
return
else:
pd.DataFrame(d).to_csv(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژه\user input data.csv',mode = 'a', header = False, index = False)
append()
def equal():
""" This function divides expenses evenly between tenants. """
import pandas as pd
user_input_df = pd.read_csv('C:/Users/ASUS/Desktop/مبانی برنامه سازی/پروژه/user input data.csv', names=['amount','time','category','subcategory','responsible unit','related unit','div'],index_col =False)
final_df = user_input_df[user_input_df['div'] == 'equal'][['amount','time','category','subcategory','related unit']]
final_df['related unit count'] = ((len(user_input_df['related unit'][0]) - 2) // 4) + 2
final_df['cost for each unit'] = final_df['amount'] // final_df['related unit count']
final_df['related unit'] = final_df['related unit'].str.replace('[','')
final_df['related unit'] = final_df['related unit'].str.replace(']','')
final_df['related unit'] = list(final_df['related unit'].str.split(','))
final_df = final_df.explode('related unit')
final_df['related unit'] = final_df['related unit'].str.strip()
del final_df['amount']
final_df.to_csv(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژه\Taraz.csv',mode = 'a', header = False, index = False)
return
def number():
""" This function divides expenses according to the number of people living in each apartment. """
import pandas as pd
user_input_df = pd.read_csv(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژه\user input data.csv', names=['amount','time','category','subcategory','responsible unit','related unit','div'],index_col=False)
resident_info = pd.read_excel(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژ''ه\data1.xlsx')
final_df = user_input_df[user_input_df['div'] == 'number'][['amount','time','category','subcategory','related unit']]
final_df['related unit'] = final_df['related unit'].str.replace('[','')
final_df['related unit'] = final_df['related unit'].str.replace(']','')
final_df['related unit'] = list(final_df['related unit'].str.split(','))
final_df = final_df.explode('related unit',ignore_index = True)
final_df['related unit'] = final_df['related unit'].str.strip()
final_df['related unit'] = final_df['related unit'].astype(int)
final_df['residents'] = resident_info[resident_info['number'].isin(final_df['related unit'])]['residents']
final_df['cost for each unit'] = (final_df['amount'] * final_df['residents']) // final_df['residents'].sum()
del final_df['amount']
final_df.to_csv(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژه\Taraz.csv',mode = 'a', header = False, index = False)
return
def area():
""" This function divides expenses according to the area of each apartment. """
import pandas as pd
user_input_df = pd.read_csv(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژه\user input data.csv', names=['amount','time','category','subcategory','responsible unit','related unit','div'],index_col=False)
resident_info = | pd.read_excel(r'C:\Users\ASUS\Desktop\مبانی برنامه سازی\پروژ''ه\data1.xlsx') | pandas.read_excel |
from SPARQLWrapper import SPARQLWrapper, JSON
import pandas as pd
import pickle, hashlib
class QTLSEARCH:
def __init__(self, search, qtls, go_annotations):
self.qtls = qtls
self.search = search
self.go_annotations = go_annotations
self.p_uniprot_reviewed = 1.00
self.p_uniprot_unreviewed = 0.95
self.loss_up_ortholog = 0.85
self.loss_down_ortholog = 0.85
self.loss_up_paralog = 0.7225
self.loss_down_paralog = 0.7225
#actions
print("\033[1m" + "=== GET DATA ===" + "\033[0m")
self.qtl_gene_roots, self.qtl_gene_protein, self.hog_group_trees, self.hog_group_genes = self.__collect_data()
print("\033[1m" + "=== COMPUTATIONS ===" + "\033[0m")
self.__do_computations()
print("\033[1m" + "=== CREATED QTLSEARCH OBJECT ===" + "\033[0m")
def report(self):
reports = []
for i in range(0,len(self.qtls)):
report = []
for gene in self.qtls[i]:
if gene in self.qtl_gene_roots.keys():
if self.qtl_gene_protein[gene] in self.hog_group_genes[self.qtl_gene_roots[gene]].index :
p_initial = self.hog_group_genes[self.qtl_gene_roots[gene]].loc[self.qtl_gene_protein[gene],"p_initial"]
p_final = self.hog_group_genes[self.qtl_gene_roots[gene]].loc[self.qtl_gene_protein[gene],"p_final"]
report.append([gene, p_initial, p_final, self.qtl_gene_protein[gene]])
df = pd.DataFrame(report)
df.columns = ["gene", "p_initial", "p_final", "protein" ]
df = df.set_index("gene")
df.sort_values(by=["p_final","p_initial","gene"], ascending=[0, 0, 1], inplace=True)
reports.append(df)
return(reports)
def __collect_data(self):
#define variables
hog_group_trees = pd.Series()
hog_group_genes = pd.Series()
#root in hog tree for each gene
qtl_gene_roots = pd.Series()
qtl_gene_protein = pd.Series()
gene_p_initial = pd.Series()
#start collecting
for qtl in self.qtls:
for gene in qtl:
if not(gene in qtl_gene_protein.keys()):
p_qtl_initial = 1.0/len(qtl)
#start searching
print("\033[1m"+"Search for "+gene+"\033[0m")
#first, go up in the hog-tree
parent_groups = self.search.get_parent_groups(gene)
if len(parent_groups)>0:
#define the root of the tree
qtl_gene_roots[gene] = parent_groups.index[0]
qtl_gene_protein[gene] = parent_groups.loc[parent_groups.index[0]].protein
print("- root is "+qtl_gene_roots[gene])
if qtl_gene_roots[gene] in hog_group_trees.index:
print("- tree already created")
if qtl_gene_protein[gene] in gene_p_initial.index:
hog_group_genes[qtl_gene_roots[gene]].loc[qtl_gene_protein[gene],"p_initial"] = max(gene_p_initial[qtl_gene_protein[gene]], p_qtl_initial)
else:
hog_group_genes[qtl_gene_roots[gene]].loc[qtl_gene_protein[gene],"p_initial"] = p_qtl_initial
else:
#go down the hog-tree, just to find tree structure
hog_group_trees[qtl_gene_roots[gene]] = self.search.get_child_groups(qtl_gene_roots[gene])
hog_group_trees[qtl_gene_roots[gene]].loc[:,"p_initial"] = pd.Series(0.0, index=hog_group_trees[qtl_gene_roots[gene]].index)
hog_group_trees[qtl_gene_roots[gene]].loc[:,"p_up"] = pd.Series(0.0, index=hog_group_trees[qtl_gene_roots[gene]].index)
hog_group_trees[qtl_gene_roots[gene]].loc[:,"p_down"] = pd.Series(0.0, index=hog_group_trees[qtl_gene_roots[gene]].index)
print("- tree of groups fetched: "+str(len(hog_group_trees[qtl_gene_roots[gene]])))
#go down again, now to find proteins
tree_proteins = self.search.get_child_proteins(qtl_gene_roots[gene])
tree_proteins_uniprot = self.search.get_child_proteins_uniprot(qtl_gene_roots[gene])
print("- proteins within tree fetched: "+str(len(tree_proteins)))
print("- uniprot proteins within tree fetched: "+str(len(tree_proteins_uniprot)))
#create final list of checked proteins
hog_group_genes[qtl_gene_roots[gene]] = tree_proteins
hog_group_genes[qtl_gene_roots[gene]].loc[:,"reviewed"] = | pd.Series("unknown", index=hog_group_genes[qtl_gene_roots[gene]].index) | pandas.Series |
import pandas as pd, numpy as np
import os,sys,random,pickle,time,glob,gc,shutil
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import tsai
from tsai.all import *
from sklearn.model_selection import KFold,StratifiedKFold
import sklearn.metrics as skm
from sklearn import preprocessing
DATA_DIR = sys.argv[1]
RANDOM_STATE = FREQ = 5
SUB_ID = f'd{FREQ}c4'
SUB_DIR = f'results/{SUB_ID}'
os.makedirs(SUB_DIR,exist_ok=True)
N_FOLDS = 5
MODEL_DIR = f'models/assets_freq_{FREQ}'
MODEL_NAME = 'XceptionTime'
print(f'running inference for model D{FREQ}')
def fix_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# torch.set_num_threads(2)
try:
dls.rng.seed(seed)
except NameError:
pass
fix_seed(RANDOM_STATE)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dfc1 = pd.read_hdf(f'{DATA_DIR}/{FREQ}D/c1_{FREQ}D.h5',key='df')
df_test = pd.read_hdf(f'{DATA_DIR}/{FREQ}D/test_{FREQ}D.h5',key='df')
FEATURES5 = ['B01', 'B03', 'B06', 'B08','B09', 'B11', 'B12', 'BSI', 'CLM',
'GBNDV2', 'GCI', 'GRNDVI', 'MSAVI', 'MYVI', 'NBR', 'NDMI', 'NDRE',
'NDSI', 'NDVI', 'NGRDI', 'NPCRI', 'PSRI', 'RDVI', 'VARIG']
def get_features_s2(df,versions=[0,1,2]):
eps=0
##v0
if 0 in versions:
df['NDVI']=(df['B08']-df['B04'])/ (df['B08']+df['B04']+eps)
df['BNDVI']=(df['B08']-df['B02'])/ (df['B08']+df['B02']+eps)
df['GNDVI']=(df['B08']-df['B03'])/ (df['B08']+df['B03']+eps)
df['GBNDVI'] = (df['B8A']-df['B03']-df['B02'])/(df['B8A']+df['B03']+df['B02']+eps)
df['GRNDVI'] = (df['B8A']-df['B03']-df['B04'])/(df['B8A']+df['B03']+df['B04']+eps)
df['RBNDVI'] = (df['B8A']-df['B04']-df['B02'])/(df['B8A']+df['B04']+df['B02']+eps)
df['GARI'] = (df['B08']-df['B03']+df['B02']-df['B04'])/(df['B08']-df['B03']-df['B02']+df['B04']+eps)
df['NBR'] = (df['B08']-df['B12'])/ (df['B08']+df['B12']+eps)
df['NDMI'] = (df['B08']-df['B11'])/ (df['B08']+df['B11']+eps)
df['NPCRI'] =(df['B04']-df['B02'])/ (df['B04']+df['B02']+eps)
a = (df['B08'] * (256-df['B04']) * (df['B08']-df['B04']))
df['AVI'] = np.sign(a) * np.abs(a)**(1/3)
df['BSI'] = ((df['B04']+df['B11']) - (df['B08']+df['B02']))/((df['B04']+df['B11']) + (df['B08']+df['B02']) +eps)
##v1
if 1 in versions:
a = ((256-df['B04'])*(256-df['B03'])*(256-df['B02']))
df['SI'] = np.sign(a) * np.abs(a)**(1/3)
df['BRI'] = ((1/(df['B03']+eps)) - (1/(df['B05']+eps)) )/ (df['B06']+eps)
df['MSAVI'] = ((2*df['B08']) + 1- np.sqrt( ((2*df['B08']+1)**2) - 8*(df['B08']-df['B04']) )) /2
df['NDSI'] = (df['B11'] - df['B12'])/(df['B11']+df['B12']+eps)
df['NDRE'] = (df['B8A'] - df['B05'])/ (df['B8A']+df['B05'] + eps)
df['NGRDI'] = (df['B03'] - df['B05'])/ (df['B03']+df['B05'] + eps)
df['RDVI'] = (df['B08']-df['B04'])/ np.sqrt(df['B08']+df['B04']+eps)
df['SIPI'] = (df['B08']-df['B02'])/ (df['B08']-df['B04']+eps)
df['PSRI'] = (df['B04']-df['B03'])/(df['B08']+eps)
df['GCI'] = (df['B08']/(df['B03']+eps))-1
df['GBNDV2'] = (df['B03']-df['B02'])/ (df['B03']+df['B02']+eps)
df['GRNDV2'] = (df['B03']-df['B04'])/ (df['B03']+df['B04']+eps)
##v2
if 2 in versions:
df['REIP'] = 700+(40* ( ( ((df['B04']+df['B07'])/2)-df['B05'])/ (df['B06']-df['B05']+eps)))
df['SLAVI'] = df['B08']/ (df['B04']+df['B12']+eps)
df['TCARI'] = 3*((df['B05']-df['B04'])-(0.2*(df['B05']-df['B03']))*(df['B05']/(df['B04']+eps)))
df['TCI'] = (1.2*(df['B05']-df['B03']))-(1.5*(df['B04']-df['B03']))*np.sqrt(df['B05']/(df['B04']+eps))
df['WDRVI'] = ((0.1*df['B8A'])-df['B05'])/((0.1*df['B8A'])+df['B05']+eps)
df['ARI'] = (1/(df['B03']+eps))-(1/(df['B05']+eps))
df['MYVI'] = (0.723 * df['B03']) - (0.597 * df['B04']) + (0.206 * df['B06']) - (0.278 * df['B8A'])
df['FE2'] = (df['B12']/ (df['B08']+eps)) + (df['B03']/ (df['B04']+eps))
df['CVI'] = (df['B08']* df['B04'])/ ((df['B03']**2)+eps)
df['VARIG'] = (df['B03'] - df['B04'])/ (df['B03']+df['B04']-df['B02'] + eps)
feat_cols = sorted([c for c in df if c not in ['tile_id','field_id','sub_id','date','label']])
drop_feats = [f for f in feat_cols if f not in FEATURES5]
df = df.drop(columns=drop_feats)
for c in FEATURES5:
df[c] = df[c].replace([-np.inf, np.inf], np.nan).astype(np.float32)
df = df.fillna(0)
return df
dfc1 = get_features_s2(dfc1,versions=[0,1,2])
df_test = get_features_s2(df_test,versions=[0,1,2])
feat_cols = sorted([c for c in dfc1 if c not in ['tile_id','field_id','sub_id','date','label']])
def get_dls(fold_id,assets_dir='assets'):
df_fields = dfc1[['field_id','label']].drop_duplicates().reset_index(drop=True)
folds = StratifiedKFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=True)
indices= [(train_index, val_index) for (train_index, val_index) in folds.split(df_fields.index,df_fields.label)]
train_index, val_index = indices[fold_id]
fields_valid = df_fields.loc[val_index].field_id.values
df_val = dfc1[dfc1.field_id.isin(fields_valid)].copy()
df_test_copy = df_test.copy()
for c in feat_cols:
with open(f"{assets_dir}/scaler_{fold_id}_{c}.pkl", "rb") as f:
scaler = pickle.load(f)
df_val[c] = scaler.transform(df_val[c].values.reshape(-1, 1))
df_test_copy[c] = scaler.transform(df_test_copy[c].values.reshape(-1, 1))
X_valid = []
y_valid=[]
gbs = ['field_id']
for field_id,grp in tqdm(df_val.groupby(gbs)):
vals = grp[feat_cols].values
X_valid.append(vals)
y_valid.append(grp['label'].values[0])
X_valid = np.asarray(X_valid)
y_valid = np.asarray(y_valid)
X_test = []
for field_id,grp in tqdm(df_test_copy.groupby('field_id')):
vals = grp[feat_cols].values
X_test.append(vals)
X_test = np.asarray(X_test)
y_test = np.zeros_like(X_test[:,0,0])
# print('fold: ',fold_id, X_train.shape, X_valid.shape)
assert(sorted(np.unique(y_valid))==[i for i in range(9)])
X, y, splits = combine_split_data([X_valid,X_test], [y_valid,y_test])
X = X.transpose(0,2,1)
bs = 128
tfms = [None, [Categorize()]]
batch_tfms=None
batch_tfms=[TSStandardize(by_sample=True, by_var=True)]
dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs],num_workers=0,batch_tfms=batch_tfms)
assert(dls.c==9)
# assert(dls.len==38)
print('dls: ',dls.c,dls.vars,dls.len)
return dls,df_val
OOFS = []
PREDS = []
PREDS_TEST = []
for fold_id in range(N_FOLDS):
fix_seed(RANDOM_STATE)
dls,df_val = get_dls(fold_id,MODEL_DIR)
model = build_model(eval(MODEL_NAME),dls=dls)
learn = Learner(dls, model, metrics=[accuracy])
learn.model_dir = MODEL_DIR
shutil.rmtree(f'{MODEL_DIR}/model.pth',ignore_errors=True)
model_fn = f'{MODEL_DIR}/{MODEL_NAME}_F{fold_id}.pth'
shutil.copyfile(model_fn,f'{MODEL_DIR}/model.pth')
learn.model.load_state_dict(torch.load(model_fn))
OOFS.append(df_val)
with learn.no_bar():
preds = learn.get_preds(ds_idx=0)[0].numpy()
PREDS.append(preds)
preds = learn.get_preds(ds_idx=1)[0].numpy()
PREDS_TEST.append(preds)
os.makedirs(SUB_DIR,exist_ok=True)
df_oof = pd.concat([o.drop_duplicates(subset=['field_id']) for o in OOFS])[['field_id','label']]
preds = np.concatenate(PREDS)
df_oof['pred'] = np.argmax(preds,axis=1)
df_oof.to_csv(f'{SUB_DIR}/oof.csv')
np.savez_compressed(f'{SUB_DIR}/preds.npz',preds)
loss,acc = skm.log_loss(df_oof.label,preds), skm.accuracy_score(df_oof.label,df_oof.pred)
print(f'OOF loss: {loss} acc: {acc}')
PREDS_TEST = np.asarray(PREDS_TEST)
np.savez_compressed(f'{SUB_DIR}/test_preds.npz',PREDS_TEST)
test_preds = np.mean(PREDS_TEST,axis=0)
assert(sorted(np.unique(np.argmax(test_preds,axis=1)))==[i for i in range(9)])
label_map = {0:'Crop_Lucerne/Medics',1:'Crop_Planted pastures (perennial)',2:'Crop_Fallow',3:'Crop_Wine grapes',
4:'Crop_Weeds',5:'Crop_Small grain grazing',6:'Crop_Wheat',7:'Crop_Canola',8:'Crop_Rooibos'}
df_preds = | pd.DataFrame(test_preds) | pandas.DataFrame |
from unittest import TestCase
from parameterized import parameterized
from collections import OrderedDict
import os
import gzip
from pandas import Series, DataFrame, date_range, Timestamp, read_csv
from pandas.testing import assert_frame_equal
from numpy import (
arange,
zeros_like,
nan,
)
import warnings
from pyfolio.utils import (
to_utc,
to_series,
check_intraday,
detect_intraday,
estimate_intraday,
)
from pyfolio.pos import (
get_percent_alloc,
extract_pos,
get_sector_exposures,
get_max_median_position_concentration,
)
class PositionsTestCase(TestCase):
dates = date_range(start="2015-01-01", freq="D", periods=20)
def test_get_percent_alloc(self):
raw_data = arange(15, dtype=float).reshape(5, 3)
# Make the first column negative to test absolute magnitudes.
raw_data[:, 0] *= -1
frame = DataFrame(
raw_data,
index=date_range("01-01-2015", freq="D", periods=5),
columns=["A", "B", "C"],
)
result = get_percent_alloc(frame)
expected_raw = zeros_like(raw_data)
for idx, row in enumerate(raw_data):
expected_raw[idx] = row / row.sum()
expected = DataFrame(
expected_raw,
index=frame.index,
columns=frame.columns,
)
assert_frame_equal(result, expected)
def test_extract_pos(self):
index_dup = [
Timestamp("2015-06-08", tz="UTC"),
Timestamp("2015-06-08", tz="UTC"),
Timestamp("2015-06-09", tz="UTC"),
Timestamp("2015-06-09", tz="UTC"),
]
index = [
Timestamp("2015-06-08", tz="UTC"),
Timestamp("2015-06-09", tz="UTC"),
]
positions = DataFrame(
{
"amount": [100.0, 200.0, 300.0, 400.0],
"last_sale_price": [10.0, 20.0, 30.0, 40.0],
"sid": [1, 2, 1, 2],
},
index=index_dup,
)
cash = Series([100.0, 200.0], index=index)
result = extract_pos(positions, cash)
expected = DataFrame(
OrderedDict(
[
(1, [100.0 * 10.0, 300.0 * 30.0]),
(2, [200.0 * 20.0, 400.0 * 40.0]),
("cash", [100.0, 200.0]),
]
),
index=index,
)
expected.index.name = "index"
expected.columns.name = "sid"
assert_frame_equal(result, expected)
@parameterized.expand(
[
(
DataFrame(
[[1.0, 2.0, 3.0, 10.0]] * len(dates),
columns=[0, 1, 2, "cash"],
index=dates,
),
{0: "A", 1: "B", 2: "A"},
DataFrame(
[[4.0, 2.0, 10.0]] * len(dates),
columns=["A", "B", "cash"],
index=dates,
),
False,
),
(
DataFrame(
[[1.0, 2.0, 3.0, 10.0]] * len(dates),
columns=[0, 1, 2, "cash"],
index=dates,
),
Series(index=[0, 1, 2], data=["A", "B", "A"]),
DataFrame(
[[4.0, 2.0, 10.0]] * len(dates),
columns=["A", "B", "cash"],
index=dates,
),
False,
),
(
DataFrame(
[[1.0, 2.0, 3.0, 10.0]] * len(dates),
columns=[0, 1, 2, "cash"],
index=dates,
),
{0: "A", 1: "B"},
DataFrame(
[[1.0, 2.0, 10.0]] * len(dates),
columns=["A", "B", "cash"],
index=dates,
),
True,
),
]
)
def test_sector_exposure(
self, positions, mapping, expected_sector_exposure, warning_expected
):
"""
Tests sector exposure mapping and rollup.
"""
with warnings.catch_warnings(record=True) as w:
result_sector_exposure = get_sector_exposures(positions, mapping)
assert_frame_equal(
result_sector_exposure, expected_sector_exposure
)
# avoids test failure due to DeprecationWarning for pandas>=1.0, <1.1
w_ = [warn for warn in w if issubclass(warn.category, UserWarning)]
if warning_expected:
self.assertEqual(len(w_), 1)
else:
self.assertEqual(len(w_), 0)
@parameterized.expand(
[
(
DataFrame(
[[1.0, 2.0, 3.0, 14.0]] * len(dates),
columns=[0, 1, 2, "cash"],
index=dates,
),
DataFrame(
[[0.15, 0.1, nan, nan]] * len(dates),
columns=[
"max_long",
"median_long",
"median_short",
"max_short",
],
index=dates,
),
),
(
DataFrame(
[[1.0, -2.0, -13.0, 15.0]] * len(dates),
columns=[0, 1, 2, "cash"],
index=dates,
),
DataFrame(
[[1.0, 1.0, -7.5, -13.0]] * len(dates),
columns=[
"max_long",
"median_long",
"median_short",
"max_short",
],
index=dates,
),
),
(
DataFrame(
[[nan, 2.0, nan, 8.0]] * len(dates),
columns=[0, 1, 2, "cash"],
index=dates,
),
DataFrame(
[[0.2, 0.2, nan, nan]] * len(dates),
columns=[
"max_long",
"median_long",
"median_short",
"max_short",
],
index=dates,
),
),
]
)
def test_max_median_exposure(self, positions, expected):
alloc_summary = get_max_median_position_concentration(positions)
| assert_frame_equal(expected, alloc_summary) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np2
def build(args):
# Get medians
def get_medians(df_p, last):
df_res = df_p.iloc[-last:].groupby(["param"]).median().reset_index()["median"][0]
return df_res
def medians_params(df_list, age_group, last):
params_def = ["age", "beta", "IFR", "RecPeriod", "alpha", "sigma"]
params_val = [
age_group,
get_medians(df_list[0], last),
get_medians(df_list[1], last),
get_medians(df_list[2], last),
get_medians(df_list[3], last),
get_medians(df_list[4], last),
]
res = dict(zip(params_def, params_val))
return res
params_data_BOG = pd.read_csv(args.params_data_path, encoding="unicode_escape", delimiter=",")
# Ages 0-19
young_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "0-19"])
young_ages_beta = | pd.DataFrame(young_ages_params[young_ages_params["param"] == "contact_rate"]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
| pdt.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import os
import tempfile
import pandas as pd
from data_describe.compat import _compat, _requires
from data_describe.misc.file_ext import _FileExtensionTypes, is_filetype
def load_data(filepath, all_folders=False, **kwargs):
"""Create pandas data frame from filepath.
Args:
filepath: The file path. Can be either a local filepath or Google Cloud Storage URI filepath
all_folders: If True, searches for text files in nested folders. If False, looks for text files in the current folder
**kwargs: Keyword arguments to pass to the reader
Raises:
ImportError: gcsfs not installed.
FileNotFoundError: File doesn't exist.
Returns:
A pandas data frame
"""
if os.path.isfile(filepath):
df = _read_file_type(filepath, **kwargs)
elif "gs://" in filepath:
if _compat.check_install("gcsfs"):
df = _read_file_type(filepath, **kwargs)
else:
raise ImportError("Package gcsfs required to load from GCS")
elif os.path.isdir(filepath):
text = []
encoding = kwargs.pop("encoding", "utf-8")
if not all_folders:
for file in os.listdir(filepath):
if os.path.isfile(os.path.join(filepath, file)) and file.endswith(
".txt"
):
with open(
os.path.join(filepath, file), "r", encoding=encoding
) as f:
text.append(f.read())
else:
for root, _, files in os.walk(filepath):
for file in files:
if file.endswith(".txt"):
with open(
os.path.join(root, file), "r", encoding=encoding
) as f:
text.append(f.read())
df = pd.DataFrame(text)
else:
raise FileNotFoundError("{} not a valid path".format(filepath))
return df
def _read_file_type(filepath, **kwargs):
"""Read the file based on file extension.
Currently supports the following filetypes:
csv, json, txt, shp
Args:
filepath: The filepath to open
**kwargs: Keyword arguments to pass to the reader
Returns:
A Pandas data frame
"""
extension = os.path.splitext(filepath)[1]
if is_filetype(_FileExtensionTypes.CSV, extension):
return pd.read_csv(filepath, **kwargs)
elif is_filetype(_FileExtensionTypes.JSON, extension):
lines = kwargs.pop("lines", True)
return pd.read_json(filepath, lines=lines, **kwargs)
elif is_filetype(_FileExtensionTypes.EXCEL, extension):
return | pd.read_excel(filepath, **kwargs) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
import sys
import pytest
import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame, Index
import pandas.util.testing as tm
class PythonParserTests(object):
def test_default_separator(self):
# GH17333
# csv.Sniffer in Python treats 'o' as separator.
text = 'aob\n1o2\n3o4'
expected = DataFrame({'a': [1, 3], 'b': [2, 4]})
result = self.read_csv(StringIO(text), sep=None)
tm.assert_frame_equal(result, expected)
def test_invalid_skipfooter(self):
text = "a\n1\n2"
# see gh-15925 (comment)
msg = "skipfooter must be an integer"
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter="foo")
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter=1.5)
with pytest.raises(ValueError, match=msg):
self.read_csv(StringIO(text), skipfooter=True)
msg = "skipfooter cannot be negative"
with pytest.raises(ValueError, match=msg):
self.read_csv( | StringIO(text) | pandas.compat.StringIO |
from torch.utils.data import DataLoader, random_split
from torchvision.utils import save_image
from dataset_creator import RiVAEDataset
from torch import optim
from torch import nn
from tqdm import tqdm
import pandas as pd
import torch
import model
import os
# get image path
path = os.path.abspath(os.path.dirname(__file__))
img_path = f"{path}/data/images"
# parameters
img_shape = [280, 280, 3] # [h, w, c]
latent_dim = 1587
epochs = 1000
batch_size = 1
lr = 0.0001
# use gpu if available
cuda_available = torch.cuda.is_available()
device = torch.device('cuda' if cuda_available else 'cpu')
print("PyTorch CUDA:", cuda_available)
# create a model from LinearVAE autoencoder class
# load it to the specified device, either gpu or cpu
model = model.RiVAE(latent_dim=latent_dim, batch_size=batch_size, img_shape=img_shape).to(device)
# create an optimizer object
# Adam optimizer with learning rate 1e-4
optimizer = optim.Adam(model.parameters(), lr=lr)
# Binary Cross Entropy loss
criterion = nn.BCELoss(reduction='sum')
# loading the dataset using DataLoader
dataset = RiVAEDataset(img_dir=img_path, img_shape=img_shape)
lengths = [round(len(dataset)*0.8), round(len(dataset)*0.2)]
train_data, val_data = random_split(dataset, lengths, generator=torch.Generator())
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
def kl_loss(mu, logvar):
"""
KL-Divergence = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
:param mu: the mean from the latent vector
:param logvar: log variance from the latent vector
"""
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return KLD
def fit(model, dataloader):
model.train()
running_loss = 0.0
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data)/dataloader.batch_size)):
data = data.to(device)
data = data.reshape((1, 3, data.shape[3], data.shape[2]))
optimizer.zero_grad()
reconstruction, mu, logvar = model(data)
criterion_loss = criterion(reconstruction, data)
kl_div = kl_loss(mu, logvar)
loss = criterion_loss + kl_div
running_loss += loss.item()
loss.backward()
optimizer.step()
train_loss = running_loss/len(dataloader.dataset)
return train_loss
def validate(model, dataloader):
model.eval()
running_loss = 0.0
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader), total=int(len(val_data) / dataloader.batch_size)):
data = data.to(device)
data = data.view((1, 3, data.shape[3], data.shape[2]))
reconstruction, mu, logvar = model(data)
criterion_loss = criterion(reconstruction, data)
kl_div = kl_loss(mu, logvar)
loss = criterion_loss + kl_div
running_loss += loss.item()
# save the last batch input and output of every epoch
if i == int(len(val_data) / dataloader.batch_size) - 1:
both = torch.cat((data.view(batch_size, img_shape[2], img_shape[0], img_shape[1]),
reconstruction.view(batch_size, img_shape[2], img_shape[0], img_shape[1])))
save_image(both.cpu(), f"{path}/data/outputs/output_{epoch}.png")
val_loss = running_loss / len(dataloader.dataset)
return val_loss
train_loss = []
val_loss = []
for epoch in range(epochs):
print(f"Epoch {epoch+1} of {epochs}")
train_epoch_loss = fit(model, train_loader)
val_epoch_loss = validate(model, val_loader)
train_loss.append(train_epoch_loss)
val_loss.append(val_epoch_loss)
df_loss = | pd.DataFrame({'train_loss': train_loss, 'val_loss': val_loss}) | pandas.DataFrame |
"""
Module for static data retrieval. These functions were performed once during the initial project creation. Resulting
data is now provided in bulk at the url above.
"""
import datetime
import json
from math import sin, cos, sqrt, atan2, radians
import re
import requests
import pandas as pd
from riverrunner import settings
from riverrunner.context import StationRiverDistance
from riverrunner.repository import Repository
def scrape_rivers_urls():
"""scrape river run data from Professor Paddle
generates URLs from the array of strings below. Each element represents a unique river. Each page is
requested with the entire HTML contents being saved to disk. The parsed river data is saved to 'data/rivers.csv'
"""
# copied from jquery selection in chrome dev tools on main prof paddle run table
river_links = pd.read_csv('riverrunner/data/static_river_urls.csv').columns.values
river_ids = [r[r.find("=")+1:] for r in river_links]
url = "http://www.professorpaddle.com/rivers/riverdetails.asp?riverid="
for id in river_ids:
r = requests.get(url + id)
if r.status_code == 200:
with open("river_%s.html" % id, 'w+') as f:
f.write(str(r.content))
rivers = []
for rid in river_ids:
with open('data/river_%s.html' % rid) as f:
river = f.readlines()
r = river[0]
row = {}
# title and river name
r = r[r.find('<font size="+2">'):]
run_name = r[r.find(">") + 1:r.find('<a')]
run_name = re.sub(r'<[^>]*>| ', ' ', run_name)
river_name = run_name[:run_name.find(' ')]
run_name = run_name[len(river_name):]
run_name = re.sub(r''', "'", run_name)
run_name = re.sub(r'—', "", run_name).strip()
row['run_name'] = re.sub(r'( )+', ' ', run_name)
row['river_name'] = river_name
# chunk off the class
r = r[r.find('Class'):]
rating = r[6:r.find('</strong>')]
row['class_rating'] = rating
# river length
r = r[r.find('<strong>')+8:]
length = r[:r.find("<")]
row['river_length'] = length
# zip code
r = r[r.find('Zip Code'):]
r = r[r.find('path')+6:]
row['zip'] = r[:r.find("<")]
# put in long
r = r[r.find("Put In Longitude"):]
r = r[r.find('path')+6:]
row['put_in_long'] = r[:r.find("<")]
# put in lat
r = r[r.find("Put In Latitude"):]
r = r[r.find('path')+6:]
row['put_in_lat'] = r[:r.find("<")]
# take out long
r = r[r.find("Take Out Longitude"):]
r = r[r.find('path')+6:]
row['take_out_long'] = r[:r.find("<")]
# take out lat
r = r[r.find("Take Out Latitude"):]
r = r[r.find('path')+6:]
row['take_out_lat'] = r[:r.find("<")]
# county
r = r[r.find("County"):]
r = r[r.find('path')+6:]
row['county'] = r[:r.find("<")]
# min level
r = r[r.find("Minimum Recomended Level"):]
r = r[r.find(" ")+6:]
row['min_level'] = r[:r.find("&")]
# min level units
r = r[r.find(';')+1:]
row['min_level_units'] = r[:r.find('&')]
# Maximum Recomended Level
r = r[r.find("Maximum Recomended Level"):]
r = r[r.find(" ")+6:]
row['max_level'] = r[:r.find("&")]
# max level units
r = r[r.find(';')+1:]
row['max_level_units'] = r[:r.find('&')]
row['id'] = rid
row['url'] = url + rid
rivers.append(row)
pd.DataFrame(rivers).to_csv('data/rivers.csv')
def parse_location_components(components, lat, lon):
"""parses location data from a Goggle address component list"""
location = {'latitude': lat, 'longitude': lon}
for component in components:
component_type = component['types']
if 'route' in component_type:
location['address'] = component['long_name']
elif 'locality' in component_type:
location['city'] = component['long_name']
elif 'administrative_area_level_2' in component_type:
location['route'] = re.sub(r'County', '', component['long_name'])
elif 'administrative_area_level_1' in component_type:
location['state'] = component['short_name']
elif 'postal_code' in component_type:
location['zip'] = component['long_name']
print(location)
return location
def parse_addresses_from_rivers():
"""parses river geolocation data and retrieves associated address information from Google geolocation services"""
df = pd.read_csv('data/rivers.csv').fillna('null')
addresses = []
# put in addresses
for name, group in df.groupby(['put_in_lat', 'put_in_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
# take out addresses
for name, group in df.groupby(['take_out_lat', 'take_out_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
if r.status_code == 200 and len(r.content) > 10:
components = json.loads(r.content)['results'][0]['address_components']
addresses.append(parse_location_components(components, name[0], name[1]))
pd.DataFrame(addresses).to_csv('data/addresses_takeout.csv', index=False)
def scrape_snowfall():
"""scrapes daily snowfall data from NOAA"""
base_url = 'https://www.ncdc.noaa.gov/snow-and-ice/daily-snow/WA-snow-depth-'
snowfall = []
for year in [2016, 2017, 2018]:
for month in range(1, 13):
for day in range(1, 32):
try:
date = '%s%02d%02d' % (year, month, day)
r = requests.get(base_url + date + '.json')
if r.status_code == 200 and len(r.content) > 0:
snf = json.loads(r.content)
for row in snf['rows']:
lat = row['c'][0]['v']
lon = row['c'][1]['v']
location_name = row['c'][2]['v'].strip().lower()
depth = row['c'][3]['v']
this_row = (datetime.datetime.strptime(str(date), '%Y%m%d').date(), lat, lon, location_name, depth)
snowfall.append(this_row)
print(this_row)
except Exception as e:
print([str(a) for a in e.args])
df = | pd.DataFrame(snowfall) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn.utils import shuffle
os.chdir('/content/gdrive/My Drive/training_testing_data/')
train = pd.read_csv('train_data_rp_3_IMBALANCED.csv')
X_train = train.iloc[:, :-1]
X_train = X_train.values
Y_train = train.iloc[:, -1:]
Y_train = Y_train.values
oversample = SMOTE()
X_train_SMOTE, Y_train_SMOTE = oversample.fit_resample(X_train, Y_train)
print('SMOTE:', sorted(Counter(Y_train_SMOTE).items()))
X_train_SMOTE, Y_train_SMOTE = shuffle(X_train_SMOTE, Y_train_SMOTE, random_state=42)
X_train_SMOTE = pd.DataFrame(X_train_SMOTE)
Y_train_SMOTE = pd.DataFrame(Y_train_SMOTE)
train_SMOTE = pd.concat([X_train_SMOTE, Y_train_SMOTE], axis=1, ignore_index=True)
train_SMOTE.to_csv('train_data_rp_3_SMOTE.csv', index=False)
oversample = ADASYN()
X_train_ADASYN, Y_train_ADASYN = oversample.fit_resample(X_train, Y_train)
print('ADASYN:', sorted(Counter(Y_train_ADASYN).items()))
X_train_ADASYN, Y_train_ADASYN = shuffle(X_train_ADASYN, Y_train_ADASYN, random_state=42)
X_train_ADASYN = pd.DataFrame(X_train_ADASYN)
Y_train_ADASYN = pd.DataFrame(Y_train_ADASYN)
train_ADASYN = | pd.concat([X_train_ADASYN, Y_train_ADASYN], axis=1, ignore_index=True) | pandas.concat |
#Author: <NAME>
import numpy as np
import os
import h5py
import pandas as pd
from AxonImaging import signal_processing as sp
def get_processed_running_speed (vsig,vref,sample_freq, smooth_filter_sigma = 0.05, wheel_diameter = 16.51, positive_speed_threshold= 70, negative_speed_threshold= -5):
''' Returns the running speed given voltage changes from an encoder wheel. Speeds are smoothed and outlier
values above or below arbrituarly defined thresholds are set as NaN.
:param Vsig: voltage signal which changes as a function of wheel movement (running)
:param Vref: reference voltage (typically 5V +/- small offset that is encoder dependent
:param sample_freq: sampling frequency which Vsig and Vref are acquired at
:param smooth_filter_sigma: value used for guassian filtering
:param wheel_diameter: diameter of running wheel
:param positive_speed_threshold: maximum allowed positive speed (sets impossibly high running speeds equal to NaN)
:param negative_speed_threshold: maximum allowed negative speed (sets impossibly high backwards running speeds equal to NaN)
:param units: whether to return in terms of seconds (dependent on the passed-in sample freq) or samples
:return: smooth traced of running speed in cm/s per sample with outliers set to NaN
'''
from scipy.ndimage import gaussian_filter1d
vref_mean = np.median(vref[np.abs(vref)<20])
position_arc = vsig*(2.*np.pi)/vref_mean
position_arc_smooth = gaussian_filter1d(position_arc, int(smooth_filter_sigma*sample_freq))
speed_arc = np.append(np.diff(position_arc_smooth),0) * sample_freq
speed = speed_arc * wheel_diameter
speed_smooth = np.copy(speed)
speed_smooth[np.logical_or(speed>=positive_speed_threshold,speed<=negative_speed_threshold)]=np.nan
mask = np.isnan(speed_smooth)
mask2 = np.zeros(mask.shape, dtype=np.bool)
for n,p in enumerate(mask):
if p:
mask2[(n-(2*int(smooth_filter_sigma*sample_freq))):(n+int((2*smooth_filter_sigma*sample_freq+1)))] = True # extend mask 2 filter widths to extend interpolation
speed_smooth[mask2] = np.interp(np.flatnonzero(mask2), np.flatnonzero(~mask2), speed[~mask2])
return speed_smooth
def get_auditory_onset_times(microphone, sample_freq, threshold=1, stdev_samples=10,filter_width=20):
'''
Finds the onset of an auditory event through first calculating a standard deviation across user defined samples and then thresholding the stdeviations to find the onset times.
:param microphone: an analog microphone signal
:param samplefreq: the sampling frequency at which the auditory signal was acquired at
:param threshold = threshold value in units of standard deviation for finding onset times (values above this are marked as a valid onset)
:param stdev_samples=number of samples to calculate each standard deviation from.
:
:return: the onset sound_times in the units of seconds
'''
from scipy.signal import convolve, boxcar
#get the standard deviation across user-defined number of samples
step=int(stdev_samples)
stdev=[]
for ii in range(0,microphone.shape[0],step):
chunk=microphone[ii:ii+step]
stdev.append(np.std(chunk))
stdev_filtered=convolve(stdev, boxcar(M=filter_width))
#get the up samples #s through thresholding
stamps=sp.threshold_greater(np.array(stdev_filtered),threshold)
#multiply these samples # by user-defined number of stdev_samples to account for the downsampling that occured when the standard deviation was calculated
stamps=np.multiply(stamps,stdev_samples)
sound_times = np.divide(stamps,sample_freq)
print ('total number of sound presentations found = '+ str(len(sound_times)))
return sound_times
def microphone_to_dB (signal, sensitivity=250, pre_amp_gain=12):
''' Converts microphone voltage to decibels given the microphone sensitivity and pre amp gain.
:param signal: the analog microphone voltage (in V)
:param sensitivity: the sensitivity of the microphone in mv/Pa
:param pre_amp_gain: gain setting on the microphone pre amp (in dB)
'''
#reference is "threshold for hearing" 20 micropascals at 1 kHz, also called SPL
reference=20E-6
baseline_v=reference*sensitivity
db=np.log10((signal/baseline_v))*20
db_nogain=db-pre_amp_gain
return db_nogain
#convert signal to pascals
#divide by the preamp gain, multiply by 1000 to convert from volts to mV
#divide by the microphone sensitivity in mV/Pa
#dB equation from voltage is 20 * log ()
def shift_aud_frames_by_mic_delay(mic_onsets, aud_frames, vsync):
'''
Time aligns auditory stimulation onset times that are given in terms relative to monitor frames (ie auditory stimulation was
presented on frame 50-100) into accurate times for which they are played/heard (detected on a microphone).
Requires that the number of sound times presented is the same quantity as the number detected by the microphone
:param mic_onsets: auditory onsets detected by a microphone (see get_auditory_onset_times function) (seconds)
:param aud_frames: frames when the auditory stimulation was initiated (typically from pikl file) (frame #'s)
:param vsync: times of each monitor frame presentation on the same time base as the microphone (seconds)
:return: array of frame numbers that correspond to onset of the auditory stimulation being played
'''
#compare total number of auditory stims with the expected number of presentations.
if len(mic_onsets)==len(aud_frames):
#get the auditory stimulation time from the pickle file and convert it to a Vsync time
#get the auditory stimulation time from the pickle file and convert it to a Vsync time
sound_frames=[]
for ii in range(len(aud_frames)):
#calculate the difference in time between the detection (microphone) and the presentation, in terms of seconds
dif=mic_onsets[ii]-vsync[aud_frames[ii]].astype(np.float32)
presented_time=vsync[aud_frames[ii]]+dif
#find the vysnc time that most closely matches the stimulation
index=np.argmin(np.abs(vsync-presented_time))
sound_frames.append(index)
#print ('time of presentation '+ str(vsync[aud_onsets[ii]]) + ' time of detection ' + str(sound_times[ii]))
sound_frames=np.array(sound_frames)
print ('mean number of visual frames between presentation and detection is ' + str((np.mean(sound_frames-aud_frames)))) + ' frames or '+ str((1/np.median(np.diff(vsync))*(np.mean(sound_frames-aud_frames))))+' millseconds (assuming 60 fps)'
return sound_frames
else:
print ('Number of known auditory presentations '+str(len(aud_frames))+ ' does not equal those detected by microphone '+ str(len(mic_onsets)))
return
def stimulus_thresh_df (paths,data_key, thresh_signal, thresh, min_l, min_t,
before, after, baseline_period,response_period,min_time_between=False,use_dff=True,
other_signals=[],dff_baseline_dur=1., exclusion_sig='null',exclusion_thresh=0.,exclusion_dur=0.,exclusion_logic='exclude',
override_ends=False, use_per_thresh=False, sample_freq=30. ):
"""
:param paths: path to HDF5 files
:param data_key: key for the HDF5 to access the data type of interest
---Thresholding parameters
:param thresh_signal: the signal to threshold on
:param thresh: the threshold
:param min_l: the minimum amount of time the signal must go below the threshold to end a period
:param min_t: minimum time for a threshold period
:param min_time_between: the minimum amount that must be between the start of two epochs. Useful for finding epochs that occur in isolation from nearby other epochs.
---trace extraction parameters
:param before: amount of time before the threshold time to extract
:param after: amount of time after the threshold time to extract
:param baseline: how many seconds in the the 'before' period to calculate baseline periods from (used in DF/F calculations and others)
:param baseline: where the "baseline" should be calculated from in the trace (used in DF/F calculations and others) . Tuple of start time and end time for the baseline.
:param sample_t_after_thresh: when sampling the "response" start this far after the threshold crossing (0 = at the threshold). Set to string 'half' to sample 50% through the epoch's duration.
:param sample_dur_after_thresh:when sampling the "response" start from sample_t_after_thresh and go this many seconds ahead
"""
import os
import h5py
import pandas as pd
#create dataframe of all ROI responses for every running epoch
total_roi_counter=0
responses=[]
meaned_responses=[]
#check to make sure that the baseline is specified as a tuple and deal with instances where it isn't
if isinstance(baseline_period, (int, float)):
print ('the baseline period was specified as a single number, not a start and end time. Assuming start time is time 0 and end time of hte baseline is what is specified.')
baseline_period=(0,baseline_period)
for path in paths:
mouse_id=os.path.basename(path)[0:7]
print ('\n processing ' + str(mouse_id) + '\n')
data_f=h5py.File(path,'r')
data=data_f.get(data_key)
if use_per_thresh==True:
#first lowpass filter and calculate the median of the trace
median=np.nanmedian(sp.butter_lowpass_filter(data[thresh_signal], cutoff=1., analog=True))
threshold_per=median+(thresh*median)
thresh=threshold_per
if exclusion_sig=='null':
runs=sp.threshold_period(signal=data[thresh_signal], threshold=thresh,
min_low=min_l, sample_freq=30., min_time=min_t)
else:
print (exclusion_logic+' epochs where the '+ str(exclusion_sig) + ' is greater than '+ str(exclusion_thresh))
runs=sp.threshold_period(signal=data[thresh_signal], threshold=thresh,min_time_between=min_time_between,
min_low=min_l, sample_freq=30., min_time=min_t,exclusion_signal=data[exclusion_sig],
exclusion_dur=exclusion_dur,exclusion_logic=exclusion_logic,
exclusion_thresh=exclusion_thresh)
#check if no threshold crossing are found. If so, go to next file
if runs.size==0:
print (' No periods found for id '+ str(mouse_id))
continue
#get teh start times from teh threshold_period output
starts=runs[:,0]
#take into account times where you want to get traces that start relative to the onset and you don't want to be concerned with their duration
if override_ends==False:
starts=runs[:,0]
ends=runs[:,1]
durs=runs[:,2]
elif isinstance(override_ends, (int, float)):
#if a number is passed to override the ends, determine the end of the periods by adding this number to the beginning
print ('Overiding detected durations and using USER-DEFINED durations')
starts=runs[:,0]
ends=starts+override_ends
durs=ends-starts
elif override_ends=='starts':
print ('setting the start times equal to the detected END TIMES!')
starts=runs[:,1]
ends=runs[:,1]
durs=(ends-starts)+1.
error_counter=0
#calculate the stimulus evoked dff for each roi
#loop for each ROI
for roi in range(len(data['axon_traces'])):
mean_onset_list=[]
mean_end_list=[]
mean_speed_list=[]
mean_delta_speed_list=[]
#create a list to store the first portion of each trace where there always a epoch peroiod
traces_onset=[]
#create a more inclusive list to store entire baseline, onset, and after periods for arbituarily selecting regions for analysis
before_after_traces=[]
#determine unique ids for each roi and calculate area
roi_unique_id=mouse_id[-6::]+'_'+ str(0)+str(roi)
mask=data_f['masks']['axon_masks'][roi]
pixels=np.where(np.logical_and(mask!=0, ~np.isnan(mask)))
roi_area=np.shape(pixels)[0]*np.shape(pixels)[1]
#loop for each epoch
for xx in range(len(starts)):
runnings=sp.get_event_trig_avg_samples(data[thresh_signal],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, verbose=False)
if response_period[1]=='half':
if override_ends==False:
response_period_end=response_period[0]+durs[xx]/2.
elif isinstance(override_ends, (int, float)):
# print ('Half duration is passed for end, but overriding durations: calculating duration from half the time of after')
response_period_end=response_period[0]+(after/2.)
else:
print ('major error')
else:
response_period_end=response_period[1]
baseline_indices=(int((baseline_period[0]*sample_freq)), int((baseline_period[1]*sample_freq)))
response_indices=(int((response_period[0]*sample_freq)), int((response_period_end*sample_freq)))
#get mean running_speed
baseline_speed=np.nanmean(runnings[baseline_indices[0]:baseline_indices[1]],axis=0)
mean_speed=np.nanmean(runnings[response_indices[0]:response_indices[1]],axis=0)
delta_speed=mean_speed-baseline_speed
#produce an array that is composed of each ROI's DF/F epeoch
axon_responses=sp.get_event_trig_avg_samples(data['axon_traces'][roi],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, dff=use_dff,dff_baseline=(baseline_period[0], baseline_period[1]), verbose=False)
#check to make sure expected durations match returned trace durations
expected_dur=((ends[xx]-starts[xx])+before+after)
trace_dur_run=int(round(len(runnings)/30.))
trace_dur_axon=int(round(len(axon_responses)/30.))
dur_check=int( round( (ends[xx]-starts[xx]+before+after)*30.))
if len(axon_responses)!=dur_check:
if error_counter==0:
print ('Epoch # ' + str(xx) + ' Trace durations do not match expected duration: Likely due to not enough samples to grab. Skipping')
error_counter+=1
continue
if ((trace_dur_run!=int(round(expected_dur))) or (trace_dur_axon!= int(round(expected_dur))) ) :
if error_counter==0:
print ('Epoch # ' + str(xx) +'. Epoch length mismatch warning: Expected duration: ' + str(int(expected_dur)) + ' and trace duration '+ str(int(trace_dur_run)) + ' do not match ')
print ('skipping event/epoch')
error_counter+=1
continue
#get any other signals the user may want
others=[]
others_means=[]
for extras in other_signals:
#get the associated running trace
sig=sp.get_event_trig_avg_samples(data[extras],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, verbose=False)
baseline_sig=np.nanmean(sig[baseline_indices[0]:baseline_indices[1]],axis=0)
mean_sig=np.nanmean(sig[response_indices[0]:response_indices[1]],axis=0)
#calculate in terms of percent change of baseline
delta_sig=(mean_sig-baseline_sig)/baseline_sig*100
onset_sig=sig[int(before*sample_freq)+1]
others.append(sig)
others_means.append([baseline_sig, onset_sig, mean_sig, delta_sig])
#calculate a trace that MUST include the region betweeen start and end. This is performed to allow for averaging of the epochs that have different durations.
#it always will produce a trace that contains the MINIMAL length resonse
end_of_eval_period_for_sig= int(round(((before+min_t)*sample_freq)))
onset_trace=axon_responses[0:end_of_eval_period_for_sig+1]
traces_onset.append(onset_trace)
#calculate a trace that includes the baseline period, the onset, and the amount of time after. used in calculation of signficance for an ROI
before_after_trace=axon_responses[0:int((before+after)*sample_freq)]
before_after_traces.append(before_after_trace)
#get the DF at the threshold crossing
onset_df=axon_responses[int(before*sample_freq)+1]
#end_index=int(ends[xx]*sample_freq)
end_index=int((before*sample_freq)+(durs[xx]*sample_freq)-1)
end_df=axon_responses[end_index]
mean_df=np.nanmean(axon_responses[response_indices[0]:response_indices[1]],axis=0)
#append to list: roi number, mouse_id, epoch number,
#start_time, end_time, duration, axon response array (DF),
#mean df_f responses at user-define time, running array, mean_speed
sublist=[roi_unique_id,mouse_id, xx, starts[xx],ends[xx],durs[xx],
axon_responses, onset_df, mean_df,end_df,
runnings,mean_speed,delta_speed,roi_area,total_roi_counter]
for yy in range(len(others)):
sublist.append(others[yy])
#baseline_sig
sublist.append(others_means[yy][0])
#peak_sig
sublist.append(others_means[yy][1])
#mean_sig
sublist.append(others_means[yy][2])
#delta_sig
sublist.append(others_means[yy][2])
responses.append(sublist)
mean_onset_list.append(onset_df)
mean_end_list.append(end_df)
mean_speed_list.append(mean_speed)
mean_delta_speed_list.append(delta_speed)
#get the mean trace from the onset and beginning of thresholded region
mean_onset_trace=np.nanmean(traces_onset,axis=0)
#determine if the average response for the ROI is significant
#12_6 change: allow significance to be calculated from arbituary regions across the entire baselein and end period, not just the consistently resposne
#therefore use the onset plus and minus the
before_after_mean=np.nanmean(before_after_traces,axis=0)
pvalue=sp.significant_response(before_after_mean, base_period=(baseline_period[0],baseline_period[1]), stim_period=(response_period[0],response_period_end), sample_freq=30.)
if pvalue < 0.05:
significant=True
else:
significant=False
mean_onset_df_roi=np.nanmean(np.asarray(mean_onset_list),axis=0)
mean_end_df_roi=np.nanmean(np.asarray(mean_end_list), axis=0)
mean_speed_roi=np.nanmean(np.asarray(mean_speed_list),axis=0)
mean_delta_speed_roi=np.nanmean(np.asarray(mean_delta_speed_list),axis=0)
meaned_responses.append([roi_unique_id, mouse_id,pvalue,significant, mean_onset_df_roi,mean_end_df_roi,
mean_speed_roi,mean_delta_speed_roi,total_roi_counter,before_after_mean,mean_onset_trace])
total_roi_counter+=1
column_names=['roi id','mouse_ID','epoch number', 'start time', 'end time', 'duration',
'axon trace', 'onset df', 'peak df', 'end df',
'threshold signal trace', 'peak thresh value', 'delta of thresh trace', 'ROI area','roi number']
for names in other_signals:
column_names.append(names)
column_names.append(str(names) + ' baseline sig')
column_names.append(str(names) + ' onset sig')
column_names.append(str(names) + ' peak sig')
column_names.append(str(names) + ' delta % sig')
df=pd.DataFrame(responses,columns=column_names)
df_mean=pd.DataFrame(meaned_responses,columns=['roi id','mouse_ID','p value', 'significant mean resp', 'mean onset df', 'mean end df',
'mean thresh signal', 'mean delta thresh signal', 'roi number','mean trace', 'mean baseline and onset trace'])
#add whether the mean response is significant to the df mean
mean_sigs=[]
for index, row in df.iterrows():
roi_num=df['roi number'][index]
#get whether it is significant on average
mean_p=float(df_mean.loc[(df_mean['roi number']==roi_num)]['p value'])
if mean_p < 0.05:
significant=True
else:
significant=False
mean_sigs.append([mean_p, bool(significant)])
df_sig_responses= | pd.DataFrame(mean_sigs, columns=['mean p value', 'mean sig']) | pandas.DataFrame |
from pythcat import pythcat
import pandas as pd
import numpy as np
import pytest
def test_repwithna():
"""
tests whether exceptions are correctly raised
and outputs are correctly returned.
"""
# test cases
df_1 = pd.DataFrame([['to? ', '%@#$!!']])
df_2 = | pd.DataFrame([['to? ', ' ']]) | pandas.DataFrame |
import os
import tempfile
import time
import pandas as pd
from tinytable.serialization.experiment_constants import *
class Serializer:
def __init__(self, loaders=dict()):
self._loaders = loaders
def register_loader(self, name, loader_class):
self._loaders[name] = loader_class
def serialize(self, df, n_trials=1, sample_fraction=None, show_result=False):
if sample_fraction is not None:
df = df.sample(frac=sample_fraction)
results = []
for trial in range(n_trials):
for loader_name, loader_class in self._loaders.items():
print(f"Serializing {loader_name}...")
size, write, load = self._serialize_dataset(df, loader_class)
metric_map = {
METRIC_FILE_SIZE: size,
METRIC_WRITE_TIME: write,
METRIC_LOAD_TIME: load
}
for metric, score in metric_map.items():
result = {COL_TRIAL: trial, COL_FILE_FORMAT: loader_name,
COL_METRIC: metric, COL_SCORE: score}
if show_result:
print(f"\tResult: {result}")
results.append(result)
return | pd.DataFrame(results) | pandas.DataFrame |
import pandas as pd
import requests
import os
from PIL import Image
import numpy as np
class CognicityLoader:
""" Loads data from the cognicity database defined in
config constructor argument
"""
def __init__(self, configObj):
""" Creates a data loader for cognicity data
Args:
configObject(dict)
databaseEngine: a sqlalchemy database engine
location: a location string, one of "id" or "ch"
data_folder_prefix: path for folder to store downloaded images
logger: a python logging object
Returns:
None
"""
self.config = configObj
self.database = configObj["database_engine"]
self.database_name = configObj["database_name"]
self.location = configObj["location"]
self.data_folder_prefix = configObj["data_folder_prefix"]
self.logger = configObj["logger"]
self.logger.debug("CognicityLoader constructed")
if not os.path.exists(self.data_folder_prefix):
os.makedirs(self.data_folder_prefix)
self.logger.debug(
"data folder doesn't exist, created path:" +
self.data_folder_prefix)
# make the img directory as well
folder_path = os.path.join(self.data_folder_prefix, self.location)
self.image_path = folder_path
if not os.path.exists(self.image_path):
self.logger.debug("Creating img folder: " + folder_path)
os.mkdir(self.image_path)
def get_image_urls(self):
"""
returns dictionary of {pkey: image_url} for
all rows in db that have an image url
"""
# TODO: this does an unsafe string concatenation and the
# configObj.database_name is vulnerable to SQL injection attacks,
# as such this code should only be run with trusted config files
# on trusted hardware.
connection = self.database.connect()
rows = pd.read_sql_query(
"""
SELECT pkey, image_url FROM """ + self.database_name + """.all_reports
WHERE image_url IS NOT null
ORDER BY created_at
""",
con=connection,
params={"database_name": self.database_name},
index_col="pkey")
return rows.to_dict()["image_url"]
def get_texts(self):
"""
Returns:
pandas dataframe of pkey to all the reports in the database
"""
rows = | pd.read_sql_query('''
SELECT pkey, text from ''' + self.database_name + '''.all_reports
WHERE text IS NOT null
AND LENGTH(text) > 0
AND text NOT SIMILAR To '%%(T|t)(E|e)(S|s)(T|t)%%'
ORDER BY created_at
''', con=self.database, index_col="pkey") | pandas.read_sql_query |
# -*- coding: utf-8 -*-
import sys
import os
import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_market_info(
# top30_univ_dataset: Output[Dataset],
market_info_dataset: Output[Dataset],
today: str,
n_days: int
) -> str:
import pandas as pd
from pandas.tseries.offsets import CustomBusinessDay
from trading_calendars import get_calendar
import functools
import pickle
import logging
import networkx as nx
import os
from sqlalchemy import create_engine
# today = pd.Timestamp.now('Asia/Seoul').strftime('%Y%m%d')
# today = '20210809'
cal_KRX = get_calendar('XKRX')
custombd_KRX = CustomBusinessDay(holidays=cal_KRX.precomputed_holidays)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# Preference
#-----------------------------------------------------------------------------
AWS_DB_ID = 'gb_master'
AWS_DB_PWD = 'qwert12345'
AWS_DB_ADDRESS = 'kwdb-daily.cf6e7v8fhede.ap-northeast-2.rds.amazonaws.com'
AWS_DB_PORT = '3306'
DB_DATABASE_NAME_daily_naver = 'daily_naver'
PROJECT_ID = 'dots-stock'
db_daily_naver_con = create_engine('mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'
.format(AWS_DB_ID, AWS_DB_PWD, AWS_DB_ADDRESS, AWS_DB_PORT, DB_DATABASE_NAME_daily_naver),
encoding='utf8',
echo=False)
# @functools.lru_cache()
def get_market_from_naver_aws(date_ref):
'''
daily naver 에서 db값 그대로 parsing 내용 받아오기
'''
with db_daily_naver_con.connect() as conn:
table_name = f'{date_ref}_daily_allstock_naver'
str_sql = f'select * from {table_name} order by 등락률 DESC'
df = pd.read_sql_query(str_sql, conn) # self.get_db_daily_naver_con())
df = df.reset_index().rename(columns={'index':'순위_상승률', 'N':'순위_시가총액'})
df['순위_상승률'] = df.순위_상승률 + 1
return df
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
# 1. Market data
#------------------------------------------------------------------------------
def get_markets_aws(date_ref, n_days):
dates_n_days_ago = get_krx_on_dates_n_days_ago(date_ref, n_days)
df_market = pd.DataFrame()
for date in dates_n_days_ago:
df_ = get_market_from_naver_aws(date)
# logger.debug(f'date : {date} and df_.shape {df_.shape}' )
df_market = df_market.append(df_)
return df_market
df_market = get_markets_aws(date_ref=today, n_days=n_days)
df_market.to_csv(market_info_dataset.path)
return today
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_base_item(
market_info_dataset: Input[Dataset],
base_item_dataset: Output[Dataset]
):
import pandas as pd
# helper function
def get_top30_list(df_market):
cols_out = ['날짜','종목코드','종목명']
return (df_market
.sort_values(['날짜','등락률'], ascending=False)
.groupby('날짜')
.head(30)[cols_out])
df_market = pd.read_csv(market_info_dataset.path)
df_base_item = get_top30_list(df_market)
df_base_item.to_csv(base_item_dataset.path)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_bros(
today: str,
n_days: int,
bros_univ_dataset: Output[Dataset]
) -> str :
'''
Returns:
list
'''
import pandas as pd
import pandas_gbq
import networkx as nx
from trading_calendars import get_calendar
PROJECT_ID = 'dots-stock'
cal_KRX = get_calendar('XKRX')
# helper functions
#-----------------------------------------------------------------------------
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_corr_pairs_gbq(date_ref, period):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
sql = f'''
SELECT
DISTINCT source,
target,
corr_value,
period,
date
FROM
`dots-stock.krx_dataset.corr_ohlc_part1`
WHERE
date = "{date_ref_}"
AND period = {period}
ORDER BY
corr_value DESC
LIMIT
1000'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID)
return df
def find_bros(date_ref, period):
'''clique over 3 nodes '''
df_edgelist = get_corr_pairs_gbq(date_ref, period)
g = nx.from_pandas_edgelist(df_edgelist, edge_attr=True)
bros_ = nx.find_cliques(g)
bros_3 = [bros for bros in bros_ if len(bros) >=3]
set_bros = set([i for l_i in bros_3 for i in l_i])
g_gang = g.subgraph(set_bros)
df_gangs_edgelist = nx.to_pandas_edgelist(g_gang)
return df_gangs_edgelist
def find_gang(date_ref):
df_gang = pd.DataFrame()
for period in [20, 40, 60, 90, 120]:
df_ = find_bros(date, period=period)
df_gang = df_gang.append(df_)
return df_gang
# jobs
dates = get_krx_on_dates_n_days_ago(date_ref=today, n_days=n_days)
df_bros = pd.DataFrame()
for date in dates:
df = find_gang(date_ref=date)
df_bros = df_bros.append(df)
df_bros.to_csv(bros_univ_dataset.path)
return 'OK'
@component(
base_image="amancevice/pandas:1.3.2-slim"
)
def get_univ_for_price(
# date_ref: str,
base_item_dataset: Input[Dataset],
bros_dataset: Input[Dataset],
univ_dataset: Output[Dataset],
):
import pandas as pd
import logging
import json
logger = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
# base item
df_top30s = pd.read_csv(base_item_dataset.path,
index_col=0,
dtype={'날짜': str}).reset_index(drop=True)
# load edge_list to make bros
df_ed = pd.read_csv(bros_dataset.path, index_col=0).reset_index(drop=True)
df_ed_r = df_ed.copy()
df_ed_r.rename(columns={'target':'source', 'source':'target'}, inplace=True)
df_ed2 = df_ed.append(df_ed_r, ignore_index=True)
df_ed2['date'] = pd.to_datetime(df_ed2.date).dt.strftime('%Y%m%d')
dic_univ = {}
for date, df in df_top30s.groupby('날짜'):
logger.debug(f'date: {date}')
l_top30 = df.종목코드.to_list()
l_bro = df_ed2[(df_ed2.date == date) &
(df_ed2.source.isin(l_top30))].target.unique().tolist()
dic_univ[date] = list(set(l_top30 + l_bro ))
with open(univ_dataset.path, 'w', encoding='utf8') as f:
json.dump(dic_univ, f)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# packages_to_install = ["tables", "pandas_gbq", "finance-datareader", "bs4", "pickle5"] # add 20210715 FIX pipeline
)
def get_adj_prices(
today: str,
dic_univ_dataset: Input[Dataset],
adj_price_dataset: Output[Dataset]
) -> str:
import json
import FinanceDataReader as fdr
from ae_module.ae_logger import ae_log
import pandas as pd
# with open(dic_univ_dataset.path, 'rb') as f:
# dic_univ = pickle.load(f)
with open(dic_univ_dataset.path, 'r') as f:
dic_univ = json.load(f)
codes_stock = []
for v in dic_univ.values():
codes_stock.extend(v)
# drop duplicates
codes_stock = list(set(codes_stock))
def get_price_adj(code, start, end):
return fdr.DataReader(code, start=start, end=end)
def get_price(l_univ, date_start, date_end):
df_price = pd.DataFrame()
for code in l_univ :
df_ = get_price_adj(code, date_start, date_end)
df_['code'] = code
# df_['price'] = df_['Close'] / df_.Close.iloc[0]
df_price = df_price.append(df_)
return df_price
ae_log.debug(f'codes_stock {codes_stock.__len__()}')
date_start = '20210101'
date_end = today
df_adj_price = get_price(codes_stock, date_start=date_start, date_end=date_end)
df_adj_price.to_csv(adj_price_dataset.path)
ae_log.debug(df_adj_price.shape)
return 'good'
@component(
# base_image="gcr.io/deeplearning-platform-release/sklearn-cpu"
base_image="amancevice/pandas:1.3.2-slim"
)
def get_target(
df_price_dataset: Input[Dataset],
df_target_dataset: Output[Dataset]
):
import pandas as pd
import numpy as np
def make_target(df):
df_ = df.copy()
df_.sort_values(by='date', inplace=True)
df_['high_p1'] = df_.high.shift(-1)
df_['high_p2'] = df_.high.shift(-2)
df_['high_p3'] = df_.high.shift(-3)
df_['close_p1'] = df_.close.shift(-1)
df_['close_p2'] = df_.close.shift(-2)
df_['close_p3'] = df_.close.shift(-3)
df_['change_p1'] = (df_.close_p1 - df_.close) / df_.close
df_['change_p2'] = (df_.close_p2 - df_.close) / df_.close
df_['change_p3'] = (df_.close_p3 - df_.close) / df_.close
df_['change_p1_over5'] = df_['change_p1'] > 0.05
df_['change_p2_over5'] = df_['change_p2'] > 0.05
df_['change_p3_over5'] = df_['change_p3'] > 0.05
df_['change_p1_over10'] = df_['change_p1'] > 0.1
df_['change_p2_over10'] = df_['change_p2'] > 0.1
df_['change_p3_over10'] = df_['change_p3'] > 0.1
df_['close_high_1'] = (df_.high_p1 - df_.close) / df_.close
df_['close_high_2'] = (df_.high_p2 - df_.close) / df_.close
df_['close_high_3'] = (df_.high_p3 - df_.close) / df_.close
df_['close_high_1_over10'] = df_['close_high_1'] > 0.1
df_['close_high_2_over10'] = df_['close_high_2'] > 0.1
df_['close_high_3_over10'] = df_['close_high_3'] > 0.1
df_['close_high_1_over5'] = df_['close_high_1'] > 0.05
df_['close_high_2_over5'] = df_['close_high_2'] > 0.05
df_['close_high_3_over5'] = df_['close_high_3'] > 0.05
df_['target_over10'] = np.logical_or.reduce([
df_.close_high_1_over10,
df_.close_high_2_over10,
df_.close_high_3_over10])
df_['target_over5'] = np.logical_or.reduce([
df_.close_high_1_over5,
df_.close_high_2_over5,
df_.close_high_3_over5])
df_['target_close_over_10'] = np.logical_or.reduce([
df_.change_p1_over10,
df_.change_p2_over10,
df_.change_p3_over10])
df_['target_close_over_5'] = np.logical_or.reduce([
df_.change_p1_over5,
df_.change_p2_over5,
df_.change_p3_over5])
df_['target_mclass_close_over10_under5'] = \
np.where(df_['change_p1'] > 0.1,
1, np.where(df_['change_p1'] > -0.05, 0, -1))
df_['target_mclass_close_p2_over10_under5'] = \
np.where(df_['change_p2'] > 0.1,
1, np.where(df_['change_p2'] > -0.05, 0, -1))
df_['target_mclass_close_p3_over10_under5'] = \
np.where(df_['change_p3'] > 0.1,
1, np.where(df_['change_p3'] > -0.05, 0, -1))
df_.dropna(subset=['high_p3'], inplace=True)
return df_
def get_target_df(df_price):
df_price.reset_index(inplace=True)
df_price.columns = df_price.columns.str.lower()
df_target = df_price.groupby('code').apply(lambda df: make_target(df))
df_target = df_target.reset_index(drop=True)
# df_target['date'] = df_target.date.str.replace('-', '')
return df_target
df_price = pd.read_csv(df_price_dataset.path)
df_target = get_target_df(df_price=df_price)
df_target.to_csv(df_target_dataset.path)
@component(
base_image="gcr.io/deeplearning-platform-release/sklearn-cpu",
packages_to_install=["stockstats"]
)
def get_techindi(
df_price_dataset: Input[Dataset],
df_techini_dataset: Output[Dataset]
):
TECHNICAL_INDICATORS_LIST = ['macd',
'boll_ub',
'boll_lb',
'rsi_30',
'dx_30',
'close_30_sma',
'close_60_sma']
from stockstats import StockDataFrame as Sdf
from sklearn.preprocessing import MaxAbsScaler
import pandas as pd
class FeatureEngineer:
"""Provides methods for preprocessing the stock price data
Attributes
----------
use_technical_indicator : boolean
we technical indicator or not
tech_indicator_list : list
a list of technical indicator names (modified from config.py)
use_turbulence : boolean
use turbulence index or not
user_defined_feature:boolean
user user defined features or not
Methods
-------
preprocess_data()
main method to do the feature engineering
"""
def __init__(
self,
use_technical_indicator=True,
tech_indicator_list=TECHNICAL_INDICATORS_LIST,
user_defined_feature=False,
):
self.use_technical_indicator = use_technical_indicator
self.tech_indicator_list = tech_indicator_list
self.user_defined_feature = user_defined_feature
def preprocess_data(self, df):
"""main method to do the feature engineering
@:param config: source dataframe
@:return: a DataMatrices object
"""
#clean data
df = self.clean_data(df)
# add technical indicators using stockstats
if self.use_technical_indicator == True:
df = self.add_technical_indicator(df)
print("Successfully added technical indicators")
# add user defined feature
if self.user_defined_feature == True:
df = self.add_user_defined_feature(df)
print("Successfully added user defined features")
# fill the missing values at the beginning and the end
df = df.fillna(method="bfill").fillna(method="ffill")
return df
def clean_data(self, data):
"""
clean the raw data
deal with missing values
reasons: stocks could be delisted, not incorporated at the time step
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df=df.sort_values(['date','tic'],ignore_index=True)
df.index = df.date.factorize()[0]
merged_closes = df.pivot_table(index = 'date',columns = 'tic', values = 'close')
merged_closes = merged_closes.dropna(axis=1)
tics = merged_closes.columns
df = df[df.tic.isin(tics)]
return df
def add_technical_indicator(self, data):
"""
calculate technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df = df.sort_values(by=['tic','date'])
stock = Sdf.retype(df.copy())
unique_ticker = stock.tic.unique()
for indicator in self.tech_indicator_list:
indicator_df = pd.DataFrame()
for i in range(len(unique_ticker)):
try:
temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
temp_indicator = pd.DataFrame(temp_indicator)
temp_indicator['tic'] = unique_ticker[i]
temp_indicator['date'] = df[df.tic == unique_ticker[i]]['date'].to_list()
indicator_df = indicator_df.append(
temp_indicator, ignore_index=True
)
except Exception as e:
print(e)
df = df.merge(indicator_df[['tic','date',indicator]],on=['tic','date'],how='left')
df = df.sort_values(by=['date','tic'])
return df
def add_user_defined_feature(self, data):
"""
add user defined features
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df["daily_return"] = df.close.pct_change(1)
df['bb_u_ratio'] = df.boll_ub / df.close
df['bb_l_ratio'] = df.boll_lb / df.close
df['max_scale_MACD'] = MaxAbsScaler().fit_transform(df[['macd']])
# df['return_lag_1']=df.close.pct_change(2)
# df['return_lag_2']=df.close.pct_change(3)
# df['return_lag_3']=df.close.pct_change(4)
# df['return_lag_4']=df.close.pct_change(5)
return df
df_price = pd.read_csv(df_price_dataset.path)
df_price.columns = df_price.columns.str.lower()
df_price.rename(columns={'code':'tic'}, inplace=True)
fe = FeatureEngineer(user_defined_feature=True)
df_process = fe.preprocess_data(df_price)
df_process.rename(columns={'tic':'code'}, inplace=True)
df_process.to_csv(df_techini_dataset.path)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# packages_to_install = ["tables", "pandas_gbq", "finance-datareader", "bs4", "pickle5"] # add 20210715 FIX pipeline
)
def get_features(
# today: str,
dic_univ_dataset: Input[Dataset],
market_info_dataset: Input[Dataset],
bros_dataset: Input[Dataset],
base_item_dataset : Input[Dataset],
features_dataset: Output[Dataset]
):
import json
# import FinanceDataReader as fdr
# from ae_module.ae_logger import ae_log
import pandas as pd
import numpy as np
from collections import Counter
from pandas.tseries.offsets import CustomBusinessDay
from trading_calendars import get_calendar
cal_KRX = get_calendar('XKRX')
custombd_KRX = | CustomBusinessDay(holidays=cal_KRX.precomputed_holidays) | pandas.tseries.offsets.CustomBusinessDay |
from ui.SplitUI import *
import pathlib
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox
import pandas as pd
import tqdm
import math
import traceback
from loguru import logger
import arrow
class Split(QMainWindow):
def showBox(self, msg):
QMessageBox.information(self, '提示', msg, QMessageBox.Yes, QMessageBox.Yes)
def showErrorBox(self, msg):
QMessageBox.Warning(self, '有问题', msg, QMessageBox.Yes, QMessageBox.Yes)
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('Excel Utils for choose v3.5 by zhuolx')
self.ui.loadButton.clicked.connect(self.__loadExcel)
self.ui.sheetComboBox.activated[str].connect(self.__loadSheets)
self.ui.splitButton.clicked.connect(self.__splitFields)
self.ui.openButton.clicked.connect(self.__openFileDialog)
self.ui.openButton_2.clicked.connect(self.__openFeeFile) #打开运费文件框
self.ui.feeLoadButton.clicked.connect(self.__loadFeeExcel)
self.ui.totalCalculate.clicked.connect(self.__calTotalFee)
self.ui.mergeNumberButton.clicked.connect(self.__mergeDh) # 合并编码
self.__weighField = ''
self.__addressField = ''
self.__df = None
self.__dfs = None
self.__feeDfs = None #运费模板
self.__paperBoxFeeDfs = None #包材
self.__packFeeDfs = None #人工
self.__valueAddDfs = None #加钱!
self.INF = 9999999
def __openFileDialog(self):
files, _ = QFileDialog.getOpenFileName(self, '选择打开文件', '.', 'All Files (*);;Excel (*.xls)')
self.ui.lineEdit.setText(files)
def __openFeeFile(self):
dire = QFileDialog.getExistingDirectory(self, '打开文件夹', '.')
self.ui.feeFilePathEdit.setText(dire)
def __splitFields(self):
total = pd.DataFrame()
dhName = self.ui.snComboBox.currentText() # 单号名称
zyName = self.ui.thingComboBox.currentText() # 货品详情名称
with tqdm.tqdm(total=self.__df.shape[0]) as countBar:
for idx, row in self.__df.iterrows():
dh = row[dhName]
zy = row[zyName]
lis = zy.split(',')
curRes = []
for l in lis:
curRes.append({'原始单号': dh, '货品': l[:l.rfind('*')], '数量': l[l.rfind('*') + 1:]})
total = total.append(curRes, ignore_index=True)
countBar.update(1)
total.to_excel('res.xlsx', index=False)
QMessageBox.information(self, '成功', '文件已生成', QMessageBox.Yes, QMessageBox.Yes)
def __loadSheets(self):
sht = self.ui.sheetComboBox.currentText()
self.__df = self.__dfs[sht].fillna('')
self.ui.snComboBox.clear()
self.ui.thingComboBox.clear()
self.ui.weighComboBox.clear()
self.ui.addressComboBox.clear()
self.ui.snComboBox.addItems(self.__df.columns)
self.ui.thingComboBox.addItems(self.__df.columns)
self.ui.weighComboBox.addItems(self.__df.columns)
self.ui.addressComboBox.addItems(self.__df.columns)
def __loadExcel(self):
try:
path = self.ui.lineEdit.text()
self.__dfs: dict = pd.read_excel(path, dtype={'订单编号': str, '原始单号': str, '纸箱型号': str}, sheet_name=None)
self.ui.sheetComboBox.addItems(list(self.__dfs.keys()))
except FileNotFoundError as e:
QMessageBox.information(self, '错误', '文件不存在!', QMessageBox.Yes, QMessageBox.Yes)
def __loadFeeExcel(self):
'''
导入运费、包材费、人工费
:return:
'''
path = self.ui.feeFilePathEdit.text()
self.__feeDfs = pd.read_excel(pathlib.Path(path).joinpath('运费.xlsx'),sheet_name=None, dtype=str)
self.__paperBoxFeeDfs = pd.read_excel(pathlib.Path(path).joinpath('包材费.xlsx'), sheet_name=None, dtype=str)
self.__packFeeDfs = pd.read_excel(pathlib.Path(path).joinpath('打包费.xlsx'), sheet_name=None, dtype=str)
self.__valueAddDfs = pd.read_excel(pathlib.Path(path).joinpath('增值服务费.xlsx'), sheet_name=None, dtype=str)
QMessageBox.information(self, '成功', '四个费用已导入', QMessageBox.Yes, QMessageBox.Yes)
logger.success('费用导入成功...')
def __calPaperBoxFee(self, row):
'''
计算包材
:param row 处理的行
:return:
'''
try:
curFeeDf = self.__paperBoxFeeDfs[row['仓库']]
ntype = row['纸箱型号']
return float(curFeeDf[curFeeDf['类型'] == ntype]['费用'].values[0])
except Exception as e:
traceback.print_exc()
self.showBox('可能存在型号为空的数据')
return self.INF
def __calDeliveryFee(self, row) -> float:
'''
北京加1块,上海加0.5块
:return:
'''
try:
curFeeDf = self.__valueAddDfs[row['仓库']]
fee = 0
add = row[self.__addressField].split()[0]
if add in curFeeDf.columns:
fee = curFeeDf[add].values[0]
return float(fee)
except Exception as e:
traceback.print_exc()
return self.INF
def __calPackFee(self, row):
'''
打包费
0-1 0.2
1-2 0.3
2-3 0.4
3-20 0.5
4件以上 每件加0.1
:return:
'''
try:
curFeeDf = self.__packFeeDfs[row['仓库']]
cur = 0
wei = math.ceil(float(row[self.ui.weighComboBox.currentText()]))
quantity = int(row['货品数量'])
for gap in curFeeDf.columns:
if eval(gap):
info = eval(curFeeDf[gap].values[0])
cur += info
return cur
except Exception as e:
traceback.print_exc()
return self.INF
def __calTransFee(self, row, addressField, weighField) -> int:
'''
计算快递费
:param row: 当前处理的行
:param addressField: 地址字段
:param weighField: 重量字段
:return: 快递费 int
'''
if not self.__feeDfs:
self.showBox('运费文件为空,是不是忘了点载入了?')
raise Exception('运费为空')
add = row[addressField].split()
if not add: #有地址是空的
return self.INF
else:
add = add[0][:2]
# logger.info(row[addressField])
area = row['仓库']
expressCompany = row['物流公司']
if '中通' in expressCompany:
expressCompany = '中通'
elif '极兔' in expressCompany:
expressCompany = '极兔'
else:
expressCompany = '百世'
curFeeDf = self.__feeDfs[area].set_index('地区')
try:
wei = float(row[weighField])
# 东莞仓新疆12元
if math.isnan(wei):
fee = 0
else:
for gap in curFeeDf:
if eval(gap):
if add in curFeeDf.index:
info = curFeeDf.loc[add, gap]
elif expressCompany in curFeeDf.index:
info = curFeeDf.loc[expressCompany, gap] # 东莞按快递公司算快递费
else:
self.showErrorBox('找不到地址对应的运费')
raise Exception('计算出错')
fee = eval(info)
return fee
except Exception as e:
QMessageBox.information(self, '失败', '计算运费出现问题', QMessageBox.Yes, QMessageBox.Yes)
traceback.print_exc()
return self.INF
def __calTotalFee(self):
'''
计算快递费,具体条件写在excel里
:return:
'''
self.__weighField = self.ui.weighComboBox.currentText()
self.__addressField = self.ui.addressComboBox.currentText()
l1 = []
l2 = []
l3 = []
l4 = []
ntotal = self.__df.shape[0]
bar = tqdm.tqdm(total=ntotal, ncols=200)
try:
for _, row in self.__df.iterrows():
transFee = paperBoxFee = packFee = valueAddFee = '-'
if self.ui.isCalTransFee.isChecked():
transFee = self.__calTransFee(row, self.__addressField, self.__weighField)
if self.ui.isCalPaperBoxFee.isChecked():
paperBoxFee = self.__calPaperBoxFee(row)
if self.ui.isCalPackFee.isChecked():
packFee = self.__calPackFee(row)
if self.ui.isCalValueAddFee.isChecked():
valueAddFee = self.__calDeliveryFee(row)
l1.append(transFee)
l2.append(paperBoxFee)
l3.append(packFee)
l4.append(valueAddFee)
bar.update(1)
apDf = pd.DataFrame({'运费计算': l1, '包材费计算': l2, '打包费计算': l3, '增值服务计算': l4})
self.__df = pd.concat([self.__df, apDf], axis=1)
except Exception as e:
traceback.print_exc()
return
finally:
bar.close()
try:
self.__df.to_excel(f'终极生成-{arrow.now().minute}.xlsx', index=False)
except Exception as e:
QMessageBox.information(self, '失败', '文件写入失败', QMessageBox.Yes, QMessageBox.Yes)
return
QMessageBox.information(self, '成功', '运费已计算完成', QMessageBox.Yes, QMessageBox.Yes)
def __mergeDh(self):
'''
合并单号和编码
:return:
'''
try:
c = self.__df.columns
except Exception as e:
QMessageBox.information(self, '错误', '要点一下需要加载的 Sheet!', QMessageBox.Yes, QMessageBox.Yes)
return
if '商品名称' not in c or '商品编码' not in c or '数量' not in c or '快递单号' not in c:
QMessageBox.information(self, '错误', '请检查表内是否有「商品名称」、「商品编码」、「快递单号」、「数量」字段!', QMessageBox.Yes, QMessageBox.Yes)
return
gps = self.__df.groupby('快递单号')
totalDf = []
for dh, curDf in gps:
curDf['合计'] = curDf['商品名称'] + '*' + curDf['数量']
curDf['合计编码'] = curDf['商品编码'] + '*' + curDf['数量']
names = ','.join(curDf['合计'].values)
no = ','.join(curDf['合计编码'].values)
totalDf.append({'快递单号': dh, '货品摘要': names, '商家编码': no})
| pd.DataFrame(totalDf) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
from datetime import timedelta as timedelta_t
from typing import Union, List, Set, Dict
from dataclasses import dataclass
from qlearn.core.utils import infer_series_frequency, _check_frame_columns
@dataclass
class DataType:
# data type: multi, ticks, ohlc
type: str
symbols: List[str]
freq: str
subtypes: Set[str]
def frequency(self):
return pd.Timedelta(self.freq)
_S1 = pd.Timedelta('1S')
_D1 = pd.Timedelta('1D')
def pre_close_time_delta(freq):
"""
What is minimal time delta time bar's close
It returns 1S for all timeframes > 1Min and F/10 otherwise
TODO: take in account session start/stop times for daily freq
"""
if freq >= _D1:
raise ValueError('Data with daily frequency is not supported properly yet !')
return _S1 if freq > _S1 else freq / 10
def pre_close_time_shift(bars):
"""
What is interval to 'last' time before bar's close
TODO: take in account session start/stop times for daily freq
"""
_tshift = pd.Timedelta(infer_series_frequency(bars[:100]))
return _tshift - pre_close_time_delta(_tshift)
def time_delta_to_str(d: Union[int, timedelta_t, pd.Timedelta]):
"""
Convert timedelta object to pretty print format
:param d:
:return:
"""
seconds = d.total_seconds() if isinstance(d, (pd.Timedelta, timedelta_t)) else int(d)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
r = ''
if days > 0:
r += '%dD' % days
if hours > 0:
r += '%dH' % hours
if minutes > 0:
r += '%dMin' % minutes
if seconds > 0:
r += '%dS' % seconds
return r
def shift_for_timeframe(signals: pd.Series, data: pd.DataFrame, tf: Union[str, pd.Timedelta]) -> pd.Series:
"""
Shift signals to future by timeframe - timeframe(data)
"""
t = pd.Timedelta(infer_series_frequency(data[:100]))
tf = pd.Timedelta(tf)
return signals.shift(1, freq=tf - t) if tf > t else signals
def timeseries_density(dx, period='1Min'):
"""
Detect average records density per period
:param dx:
:param period:
:return:
"""
return dx.groupby( | pd.Grouper(freq=period) | pandas.Grouper |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series( | pd.date_range("2011-01-01", periods=3, tz="US/Eastern") | pandas.date_range |
"""
This is the main class for the NARPS analysis
There are three classes defined here:
Narps: this is a class that wraps the entire dataset
NarpsTeam: this class is instantiated for each team
NarpsDirs: This class contains info about all
of the directories that are needed for this and
subsequent analyses
The code under the main loop at the bottom
runs all of the image preprocessing that is
needed for subsequent analyses
"""
import numpy
import pandas
import nibabel
import json
import os
import sys
import time
import glob
import datetime
import nilearn.image
import nilearn.input_data
import nilearn.plotting
import shutil
import warnings
import pickle
from nipype.interfaces.fsl.model import SmoothEstimate
import wget
import tarfile
from urllib.error import HTTPError
import hashlib
import inspect
from utils import get_metadata, TtoZ, get_map_metadata,\
log_to_file, stringify_dict
from ValueDiagnostics import compare_thresh_unthresh_values
# # set up data url - COMMENTING NOW, WILL REMOVE
# # this is necessary for now because the data are still private
# # once the data are public we can share the info.json file
# Hypotheses:
#
# Parametric effect of gain:
#
# 1. Positive effect in ventromedial PFC - equal indifference group
# 2. Positive effect in ventromedial PFC - equal range group
# 3. Positive effect in ventral striatum - equal indifference group
# 4. Positive effect in ventral striatum - equal range group
#
# Parametric effect of loss:
# - 5: Negative effect in VMPFC - equal indifference group
# - 6: Negative effect in VMPFC - equal range group
# - 7: Positive effect in amygdala - equal indifference group
# - 8: Positive effect in amygdala - equal range group
#
# Equal range vs. equal indifference:
#
# - 9: Greater positive response to losses in amygdala for equal range
# condition vs. equal indifference condition.
hypotheses = {1: '+gain: equal indiff',
2: '+gain: equal range',
3: '+gain: equal indiff',
4: '+gain: equal range',
5: '-loss: equal indiff',
6: '-loss: equal range',
7: '+loss: equal indiff',
8: '+loss: equal range',
9: '+loss:ER>EI'}
hypnums = [1, 2, 5, 6, 7, 8, 9]
# separate class to store base directories,
# since we need them in multiple places
class NarpsDirs(object):
"""
class defining directories for project
"""
def __init__(self, basedir, dataurl=None,
force_download=False, testing=False):
# set up a dictionary to contain all of the
# directories
self.dirs = {}
self.testing = testing
# check to make sure home of basedir exists
assert os.path.exists(os.path.dirname(basedir))
self.dirs['base'] = basedir
if not os.path.exists(basedir):
os.mkdir(basedir)
self.force_download = force_download
self.data_url = dataurl
dirs_to_add = ['output', 'metadata', 'templates',
'cached', 'figures', 'logs', 'orig',
'image_diagnostics_orig',
'image_diagnostics_zstat']
for d in dirs_to_add:
self.dirs[d] = os.path.join(self.dirs['base'], d)
self.dirs['fsl_templates'] = os.path.join(
os.environ['FSLDIR'],
'data/standard')
# autogenerate all of the directories
# except for the orig dir
for d in dirs_to_add:
if d != 'orig' and not os.path.exists(self.dirs[d]):
os.mkdir(self.dirs[d])
self.logfile = os.path.join(self.dirs['logs'], 'narps.txt')
if not self.testing:
log_to_file(
self.logfile,
'Running Narps main class',
flush=True)
output_dirs = ['resampled', 'rectified', 'zstat',
'thresh_mask_orig']
for o in output_dirs:
self.get_output_dir(o)
# if raw data don't exist, download them
if self.force_download and os.path.exists(self.dirs['orig']):
shutil.rmtree(self.dirs['orig'])
if not os.path.exists(self.dirs['orig']):
self.get_orig_data()
assert os.path.exists(self.dirs['orig'])
# make sure the necessary templates are present
# these should be downloaded with the raw data
self.MNI_mask = os.path.join(self.dirs['fsl_templates'],
'MNI152_T1_2mm_brain_mask.nii.gz')
assert os.path.exists(self.MNI_mask)
self.MNI_template = os.path.join(self.dirs['fsl_templates'],
'MNI152_T1_2mm.nii.gz')
assert os.path.exists(self.MNI_template)
self.full_mask_img = os.path.join(self.dirs['templates'],
'MNI152_all_voxels.nii.gz')
def get_output_dir(self, dirID, base='output'):
"""get the directory path for a particular ID. if it doesn't
exist then create it and save to the dirs list
dir names always match the dir ID exactly
"""
if dirID in self.dirs:
return(self.dirs[dirID])
else:
self.dirs[dirID] = os.path.join(
self.dirs[base],
dirID
)
if not os.path.exists(self.dirs[dirID]):
os.mkdir(self.dirs[dirID])
return(self.dirs[dirID])
def get_orig_data(self):
"""
download original data from repository
"""
log_to_file(
self.logfile,
'get_orig_data',
headspace=2)
log_to_file(self.logfile, 'DATA_URL: %s' % self.data_url)
MAX_TRIES = 5
if self.data_url is None:
raise Exception('no URL for original data, cannot download')
print('orig data do not exist, downloading...')
output_directory = self.dirs['base']
no_dl = True
ntries = 0
# try several times in case of http error
while no_dl:
try:
filename = wget.download(self.data_url, out=output_directory)
no_dl = False
except HTTPError:
ntries += 1
time.sleep(1) # wait a second
if ntries > MAX_TRIES:
raise Exception('Problem downloading original data')
# save a hash of the tarball for data integrity
filehash = hashlib.md5(open(filename, 'rb').read()).hexdigest()
log_to_file(self.logfile, 'hash of tar file: %s' % filehash)
tarfile_obj = tarfile.open(filename)
tarfile_obj.extractall(path=self.dirs['base'])
os.remove(filename)
class NarpsTeam(object):
"""
class defining team information
"""
def __init__(self, teamID, NV_collection_id, dirs, verbose=False):
self.dirs = dirs
self.teamID = teamID
self.NV_collection_id = NV_collection_id
self.datadir_label = '%s_%s' % (NV_collection_id, teamID)
# directory for the original maps
self.input_dir = os.path.join(self.dirs.dirs['orig'],
'%s_%s' % (NV_collection_id, teamID))
if not os.path.exists(self.input_dir):
print("Warning: Input dir (%s) does not exist" % self.input_dir)
self.verbose = verbose
self.image_json = None
self.jsonfile = None
self.has_all_images = None
self.logs = {}
# create image directory structure
output_dirs = {'thresh': ['orig', 'resampled', 'thresh_mask_orig'],
'unthresh': ['orig', 'resampled', 'rectified', 'zstat']}
self.images = {}
for imgtype in ['thresh', 'unthresh']:
self.images[imgtype] = {}
for o in output_dirs[imgtype]:
self.images[imgtype][o] = {}
self.n_nan_inmask_values = {}
self.n_zero_inmask_values = {}
self.has_resampled = None
self.has_binarized_masks = None
# populate the image data structure
self.get_orig_images()
# check whether image needs to be rectified
logfile = os.path.join(
self.dirs.dirs['logs'],
'image_diagnostics.log')
collection_string = '%s_%s' % (self.NV_collection_id, self.teamID)
if not os.path.exists(self.dirs.dirs['image_diagnostics_orig']):
os.mkdir(self.dirs.dirs['image_diagnostics_orig'])
self.image_diagnostics_file = os.path.join(
self.dirs.dirs['image_diagnostics_orig'],
'%s.csv' % collection_string
)
if not os.path.exists(self.image_diagnostics_file):
self.image_diagnostics = compare_thresh_unthresh_values(
dirs, collection_string, logfile)
self.image_diagnostics.to_csv(self.image_diagnostics_file)
else:
self.image_diagnostics = pandas.read_csv(
self.image_diagnostics_file)
# create a dict with the rectified values
# use answers from spreadsheet
self.rectify = {}
for i in self.image_diagnostics.index:
self.rectify[
self.image_diagnostics.loc[
i, 'hyp']] = self.image_diagnostics.loc[
i, 'reverse_contrast']
# manual fixes to rectify status per spreadsheet answers for hyp 9
if self.teamID in ['46CD']:
self.rectify[9] = True
def get_orig_images(self):
"""
find orig images
"""
self.has_all_images = {
'thresh': True,
'unthresh': True}
for hyp in hypotheses:
for imgtype in self.images:
imgfile = os.path.join(
self.input_dir,
'hypo%d_%s.nii.gz' % (hyp, imgtype))
if os.path.exists(imgfile):
self.images[imgtype]['orig'][hyp] = imgfile
else:
self.images[imgtype]['orig'][hyp] = None
self.has_all_images[imgtype] = False
def create_binarized_thresh_masks(self, thresh=1e-4,
overwrite=False,
replace_na=True):
"""
create binarized version of thresholded maps
"""
self.has_binarized_masks = True
if self.verbose:
print('creating binarized masks for', self.teamID)
for hyp in self.images['thresh']['orig']:
img = self.images['thresh']['orig'][hyp]
maskimg = os.path.join(
self.dirs.dirs['thresh_mask_orig'],
self.datadir_label,
os.path.basename(img))
self.images['thresh']['thresh_mask_orig'][hyp] = maskimg
if not os.path.exists(os.path.dirname(
maskimg)):
os.mkdir(os.path.dirname(maskimg))
if overwrite or not os.path.exists(maskimg):
# load the image and threshold/binarize it
threshimg = nibabel.load(img)
threshdata = threshimg.get_data()
# some images use nan instead of zero for the non-excursion
# voxels, so we need to replace with zeros
if replace_na:
threshdata = numpy.nan_to_num(threshdata)
threshdata_bin = numpy.zeros(threshdata.shape)
# if the team reported using a negative contrast,
# then we use the negative direction, otherwise
# use the positive direction.
# we use a small number instead of zero to address
# numeric issues
if self.rectify[hyp]:
# use negative
threshdata_bin[threshdata < -1*thresh] = 1
else:
# use positive
threshdata_bin[threshdata > thresh] = 1
# save back to a nifti image with same geometry
# as original
bin_img = nibabel.Nifti1Image(threshdata_bin,
affine=threshimg.affine)
bin_img.to_filename(maskimg)
else:
# if it already exists, just use existing
if not os.path.exists(maskimg):
bin_img = nibabel.load(maskimg)
if self.verbose:
print('copying existing binary mask for',
self.teamID)
def get_resampled_images(self, imgtype,
overwrite=False, replace_na=False):
"""
resample images into common space using nilearn
"""
self.has_resampled = True
# use linear interpolation for binarized maps, then threshold at 0.5
# this avoids empty voxels that can occur with NN interpolation
interp_type = {'thresh': 'linear', 'unthresh': 'continuous'}
data_dirname = {'thresh': 'thresh_mask_orig',
'unthresh': 'orig'}
resampled_dir = self.dirs.get_output_dir('resampled')
for hyp in hypotheses:
infile = os.path.join(
self.dirs.dirs[data_dirname[imgtype]],
self.datadir_label,
'hypo%d_%s.nii.gz' % (hyp, imgtype))
outfile = os.path.join(
resampled_dir,
self.datadir_label,
'hypo%d_%s.nii.gz' % (hyp, imgtype))
self.images[imgtype]['resampled'][hyp] = outfile
if not os.path.exists(os.path.dirname(outfile)):
os.mkdir(os.path.dirname(outfile))
if not os.path.exists(outfile) or overwrite:
if self.verbose:
print("resampling", infile)
# create resampled file
# ignore nilearn warnings
# these occur on some of the unthresholded images
# that contains NaN values
# we probably don't want to set those to zero
# because those would enter into interpolation
# and then would be treated as real zeros later
# rather than "missing data" which is the usual
# intention
with warnings.catch_warnings():
warnings.simplefilter("ignore")
resampled = nilearn.image.resample_to_img(
infile,
self.dirs.MNI_template,
interpolation=interp_type[imgtype])
if imgtype == 'thresh':
resampled = nilearn.image.math_img(
'img>0.5',
img=resampled)
resampled.to_filename(outfile)
else:
if self.verbose:
print('using existing resampled image for',
self.teamID)
class Narps(object):
"""
main class for NARPS analysis
"""
def __init__(self, basedir, metadata_file=None,
verbose=False, overwrite=False,
dataurl=None, testing=False):
self.basedir = basedir
self.dirs = NarpsDirs(basedir, dataurl=dataurl,
testing=testing)
self.verbose = verbose
self.teams = {}
self.overwrite = overwrite
self.started_at = datetime.datetime.now()
self.testing = testing
# create the full mask image if it doesn't already exist
if not os.path.exists(self.dirs.full_mask_img):
print('making full image mask')
self.mk_full_mask_img(self.dirs)
assert os.path.exists(self.dirs.full_mask_img)
# get input dirs for orig data
self.image_jsons = None
self.input_dirs = self.get_input_dirs(self.dirs)
# check images for each team
self.complete_image_sets = {}
self.get_orig_images(self.dirs)
for imgtype in ['thresh', 'unthresh']:
log_to_file(
self.dirs.logfile,
'found %d teams with complete original %s datasets' % (
len(self.complete_image_sets[imgtype]), imgtype))
# set up metadata
if metadata_file is None:
self.metadata_file = os.path.join(
self.dirs.dirs['orig'],
'analysis_pipelines_for_analysis.xlsx')
else:
self.metadata_file = metadata_file
self.metadata = get_metadata(self.metadata_file)
self.hypothesis_metadata = pandas.DataFrame(
columns=['teamID', 'hyp', 'n_na', 'n_zero'])
self.all_maps = {'thresh': {'resampled': None},
'unthresh': {'resampled': None}}
self.rectified_list = []
def mk_full_mask_img(self, dirs):
"""
create a mask image with ones in all voxels
"""
# make full image mask (all voxels)
mi = nibabel.load(self.dirs.MNI_mask)
d = numpy.ones(mi.shape)
full_mask = nibabel.Nifti1Image(d, affine=mi.affine)
full_mask.to_filename(self.dirs.full_mask_img)
def get_input_dirs(self, dirs, verbose=True, load_json=True):
"""
get orig dirs
- assumes that images.json is present for each valid dir
"""
input_files = glob.glob(
os.path.join(dirs.dirs['orig'], '*/hypo1_*thresh.nii.gz'))
input_dirs = [os.path.dirname(i) for i in input_files]
input_dirs = list(set(input_dirs)) # get unique dirs
log_to_file(
self.dirs.logfile,
'found %d input directories' % len(input_dirs))
for i in input_dirs:
collection_id = os.path.basename(i)
NV_collection_id, teamID = collection_id.split('_')
if teamID not in self.teams:
self.teams[teamID] = NarpsTeam(
teamID, NV_collection_id, dirs, verbose=self.verbose)
if os.path.exists(os.path.join(i, 'images.json')):
self.teams[teamID].jsonfile = os.path.join(
i, 'images.json')
with open(self.teams[teamID].jsonfile) as f:
self.teams[teamID].image_json = json.load(f)
def get_orig_images(self, dirs):
"""
load orig images
"""
self.complete_image_sets = {
'thresh': [],
'unthresh': []}
for teamID in self.teams:
self.teams[teamID].get_orig_images()
for imgtype in self.teams[teamID].images:
if self.teams[teamID].has_all_images[imgtype]:
self.complete_image_sets[imgtype].append(teamID)
# sort the teams - this is the order that will be used
for imgtype in self.teams[teamID].images:
self.complete_image_sets[imgtype].sort()
def get_binarized_thresh_masks(self):
"""
create binarized thresholded maps for each team
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
for teamID in self.complete_image_sets['thresh']:
self.teams[teamID].create_binarized_thresh_masks()
def get_resampled_images(self, overwrite=None):
"""
resample all images into FSL MNI space
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
if overwrite is None:
overwrite = self.overwrite
for imgtype in ['thresh', 'unthresh']:
for teamID in self.complete_image_sets[imgtype]:
self.teams[teamID].get_resampled_images(imgtype=imgtype)
def check_image_values(self, overwrite=None):
"""
get # of nonzero and NA voxels for each image
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
if overwrite is None:
overwrite = self.overwrite
image_metadata_file = os.path.join(
self.dirs.dirs['metadata'], 'image_metadata_df.csv')
if os.path.exists(image_metadata_file) and not overwrite:
print('using cached image metdata')
image_metadata_df = pandas.read_csv(image_metadata_file)
return(image_metadata_df)
# otherwise load from scractch
image_metadata = []
masker = nilearn.input_data.NiftiMasker(mask_img=self.dirs.MNI_mask)
for teamID in self.complete_image_sets['thresh']:
for hyp in self.teams[teamID].images['thresh']['resampled']:
threshfile = self.teams[teamID].images[
'thresh']['resampled'][hyp]
threshdata = masker.fit_transform(threshfile)
image_metadata.append(
[teamID, hyp, numpy.sum(numpy.isnan(threshdata)),
numpy.sum(threshdata == 0.0)])
image_metadata_df = pandas.DataFrame(
image_metadata, columns=['teamID', 'hyp', 'n_na', 'n_nonzero'])
image_metadata_df.to_csv(image_metadata_file)
return(image_metadata_df)
def create_concat_images(self, datatype='resampled',
create_voxel_map=False,
imgtypes=None,
overwrite=None):
"""
create images concatenated across teams
ordered by self.complete_image_sets
create_voxel_map: will create a map showing
proportion of nonzero teams at each voxel
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if imgtypes is None:
imgtypes = ['thresh', 'unthresh']
if overwrite is None:
overwrite = self.overwrite
for imgtype in imgtypes:
concat_dir = self.dirs.get_output_dir(
'%s_concat_%s' % (imgtype, datatype))
for hyp in range(1, 10):
outfile = os.path.join(
concat_dir,
'hypo%d.nii.gz' % hyp)
if self.verbose:
print(outfile)
if not os.path.exists(outfile) or overwrite:
if self.verbose:
print('%s - hypo %d: creating concat file' % (
imgtype, hyp))
concat_teams = [
teamID for teamID in self.complete_image_sets[imgtype]
if os.path.exists(
self.teams[teamID].images[imgtype][datatype][hyp])]
self.all_maps[imgtype][datatype] = [
self.teams[teamID].images[imgtype][datatype][hyp]
for teamID in concat_teams]
# use nilearn NiftiMasker to load data
# and save to a new file
masker = nilearn.input_data.NiftiMasker(
mask_img=self.dirs.MNI_mask)
concat_data = masker.fit_transform(
self.all_maps[imgtype][datatype])
concat_img = masker.inverse_transform(concat_data)
concat_img.to_filename(outfile)
if create_voxel_map:
concat_data = nibabel.load(outfile).get_data()
voxel_map = numpy.mean(
numpy.abs(concat_data) > 1e-6, 3)
voxel_img = nibabel.Nifti1Image(
voxel_map, affine=concat_img.affine)
mapfile = outfile.replace(
'.nii.gz', '_voxelmap.nii.gz'
)
assert mapfile != outfile
voxel_img.to_filename(mapfile)
# save team ID and files to a label file for provenance
labelfile = outfile.replace('.nii.gz', '.labels')
with open(labelfile, 'w') as f:
for i, team in enumerate(concat_teams):
f.write('%s\t%s%s' % (
team,
self.all_maps[imgtype][datatype][i],
os.linesep))
else:
if self.verbose:
print('%s - hypo %d: using existing file' % (
imgtype, hyp))
return(self.all_maps)
def create_mean_thresholded_images(self, datatype='resampled',
overwrite=None, thresh=1e-5):
"""
create overlap maps for thresholded images
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
imgtype = 'thresh'
if overwrite is None:
overwrite = self.overwrite
output_dir = self.dirs.get_output_dir('overlap_binarized_thresh')
concat_dir = self.dirs.get_output_dir(
'%s_concat_%s' % (imgtype, datatype))
for hyp in range(1, 10):
outfile = os.path.join(
output_dir,
'hypo%d.nii.gz' % hyp)
if not os.path.exists(outfile) or overwrite:
if self.verbose:
print('%s - hypo %d: creating overlap file' % (
imgtype, hyp))
concat_file = os.path.join(
concat_dir,
'hypo%d.nii.gz' % hyp)
concat_img = nibabel.load(concat_file)
concat_data = concat_img.get_data()
concat_data = (concat_data > thresh).astype('float')
concat_mean = numpy.mean(concat_data, 3)
concat_mean_img = nibabel.Nifti1Image(concat_mean,
affine=concat_img.affine)
concat_mean_img.to_filename(outfile)
else:
if self.verbose:
print('%s - hypo %d: using existing file' % (
imgtype, hyp))
def create_rectified_images(self, map_metadata_file=None,
overwrite=None):
"""
create rectified images
- contrasts 5 and 6 were negative contrasts
some teams uploaded images where negative values
provided evidence in favor of the contrast
using metadata provided by teams, we identify these
images and flip their valence so that all maps
present positive evidence for each contrast
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
for teamID in self.complete_image_sets['unthresh']:
if not hasattr(self.teams[teamID], 'rectify'):
print('no rectification data for %s, skipping' % teamID)
continue
for hyp in range(1, 10):
if hyp not in self.teams[teamID].rectify:
print('no rectification data for %s hyp%d, skipping' % (
teamID, hyp))
continue
rectify = self.teams[teamID].rectify[hyp]
# load data from unthresh map within
# positive voxels of thresholded mask
unthresh_file = self.teams[
teamID].images['unthresh']['resampled'][hyp]
self.teams[
teamID].images[
'unthresh']['rectified'][hyp] = os.path.join(
self.dirs.dirs['rectified'],
self.teams[teamID].datadir_label,
'hypo%d_unthresh.nii.gz' % hyp)
if not os.path.exists(
os.path.dirname(
self.teams[
teamID].images['unthresh']['rectified'][hyp])):
os.mkdir(os.path.dirname(
self.teams[teamID].images[
'unthresh']['rectified'][hyp]))
if overwrite or not os.path.exists(
self.teams[
teamID].images['unthresh']['rectified'][hyp]):
# if values were flipped for negative contrasts
if rectify:
print('rectifying hyp', hyp, 'for', teamID)
img = nibabel.load(unthresh_file)
img_rectified = nilearn.image.math_img(
'img*-1', img=img)
img_rectified.to_filename(
self.teams[
teamID].images['unthresh']['rectified'][hyp])
self.rectified_list.append((teamID, hyp))
else: # just copy original
shutil.copy(
unthresh_file,
self.teams[
teamID].images['unthresh']['rectified'][hyp])
# write list of rectified teams to disk
if len(self.rectified_list) > 0:
with open(os.path.join(self.dirs.dirs['metadata'],
'rectified_images_list.txt'), 'w') as f:
for l in self.rectified_list:
f.write('%s\t%s%s' % (l[0], l[1], os.linesep))
def compute_image_stats(self, datatype='zstat', overwrite=None):
"""
compute std and range on statistical images
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
# set up directories
unthresh_concat_dir = self.dirs.get_output_dir(
'unthresh_concat_%s' % datatype)
unthresh_range_dir = self.dirs.get_output_dir(
'unthresh_range_%s' % datatype)
unthresh_std_dir = self.dirs.get_output_dir(
'unthresh_std_%s' % datatype)
for hyp in range(1, 10):
unthresh_file = os.path.join(
unthresh_concat_dir,
'hypo%d.nii.gz' % hyp)
range_outfile = os.path.join(
unthresh_range_dir,
'hypo%d.nii.gz' % hyp)
std_outfile = os.path.join(
unthresh_std_dir,
'hypo%d.nii.gz' % hyp)
if not os.path.exists(range_outfile) \
or not os.path.exists(std_outfile) \
or overwrite:
unthresh_img = nibabel.load(unthresh_file)
unthresh_data = unthresh_img.get_data()
concat_data = numpy.nan_to_num(unthresh_data)
# compute range
datarange = numpy.max(concat_data, axis=3) \
- numpy.min(concat_data, axis=3)
range_img = nibabel.Nifti1Image(
datarange,
affine=unthresh_img.affine)
range_img.to_filename(range_outfile)
# compute standard deviation
datastd = numpy.std(concat_data, axis=3)
std_img = nibabel.Nifti1Image(
datastd,
affine=unthresh_img.affine)
std_img.to_filename(std_outfile)
def convert_to_zscores(self, map_metadata_file=None, overwrite=None):
"""
convert rectified images to z scores
- unthresholded images could be either t or z images
- if they are already z then just copy
- use metadata supplied by teams to determine image type
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
if map_metadata_file is None:
map_metadata_file = os.path.join(
self.dirs.dirs['orig'],
'narps_neurovault_images_details_responses_corrected.csv')
print('using map_metadata_file:', map_metadata_file)
unthresh_stat_type = get_map_metadata(map_metadata_file)
metadata = get_metadata(self.metadata_file)
n_participants = metadata[['n_participants', 'NV_collection_string']]
n_participants.index = metadata.teamID
unthresh_stat_type = unthresh_stat_type.merge(
n_participants, left_index=True, right_index=True)
for teamID in self.complete_image_sets['unthresh']:
if teamID not in unthresh_stat_type.index:
print('no map metadata for', teamID)
continue
# this is a bit of a kludge
# since some contrasts include all subjects
# but others only include some
# we don't have the number of participants in each
# group so we just use the entire number
n = unthresh_stat_type.loc[teamID, 'n_participants']
for hyp in range(1, 10):
infile = self.teams[
teamID].images['unthresh']['rectified'][hyp]
if not os.path.exists(infile):
print('skipping', infile)
continue
self.teams[
teamID].images['unthresh']['zstat'][hyp] = os.path.join(
self.dirs.dirs['zstat'],
self.teams[teamID].datadir_label,
'hypo%d_unthresh.nii.gz' % hyp)
if not overwrite and os.path.exists(
self.teams[teamID].images['unthresh']['zstat'][hyp]):
continue
if unthresh_stat_type.loc[
teamID, 'unthresh_type'].lower() == 't':
if not os.path.exists(
os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp])):
os.mkdir(os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp]))
print("converting %s (hyp %d) to z - %d participants" % (
teamID, hyp, n))
TtoZ(infile,
self.teams[teamID].images['unthresh']['zstat'][hyp],
n-1)
elif unthresh_stat_type.loc[teamID, 'unthresh_type'] == 'z':
if not os.path.exists(os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp])):
os.mkdir(os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp]))
if not os.path.exists(
self.teams[
teamID].images['unthresh']['zstat'][hyp]):
print('copying', teamID)
shutil.copy(
infile,
os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp]))
else:
# if it's not T or Z then we skip it as it's not usable
print('skipping %s - other data type' % teamID)
def estimate_smoothness(self, overwrite=None, imgtype='zstat'):
"""
estimate smoothness of Z maps using FSL's smoothness estimation
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
output_file = os.path.join(self.dirs.dirs['metadata'],
'smoothness_est.csv')
if os.path.exists(output_file) and not overwrite:
if self.verbose:
print('using existing smoothness file')
smoothness_df = | pandas.read_csv(output_file) | pandas.read_csv |
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import matplotlib as mpl
from matplotlib.lines import Line2D
from ..reportity import reportity
from matplotlib import pyplot as plt
from .plotly_styled_figure import StayledFigures, Colors
pd.options.display.float_format = '{:,}'.format
def main():
dataframe = get_dataframe()
simple_matplot_fig = get_simple_matplot_fig()
complicated_matplot_fig = get_complicated_matplot()
plotly_fig = get_plotly_figure()
report = reportity.Reportity(
title='Reportity Example',
include_plotly_js=False,
)
report.print_header(
text='Description',
level=1,
)
report.print_paragraph(
text='This is an example of Reportity<br>Look how easy it is'
)
report.print_header(
text='Data',
level=1,
)
report.print_paragraph(
text='This is a dataframe with some data. You can limit it with max row'
)
report.print_dataframe(
dataframe=dataframe,
max_rows=5,
)
report.print_header(
text='Figures',
level=1,
)
report.print_paragraph(
text='Printing graphs is easy!<br>We strongly recommend to use Plotly but you can use matplotlib if you want<br>You can print one or two figures in a row'
)
report.print_header(
text='Plotly Figures',
level=2,
)
report.print_figure(
figure=plotly_fig
)
report.print_2_figures(
figure_left=plotly_fig,
figure_right=plotly_fig,
figure_name='Two Figures'
)
report.print_header(
text='Matplotlib Figures',
level=2,
)
report.print_paragraph(
text='mpld3 converts matplot figures to interactive figures'
)
report.print_figure(
figure=simple_matplot_fig,
)
report.print_header(
text='Complicated Figures',
level=2,
)
report.print_paragraph(
text='Here we have a complicated figure that mpld3 doesn\'t know how to convert properly. In this case we will use an image insted of an interactive figure'
)
report.print_figure(
figure=complicated_matplot_fig,
figure_name='Bad randering'
)
report.print_figure(
figure=complicated_matplot_fig,
figure_name='Figure as image',
as_image=True,
)
report.save_as_html(
path='example_report.html',
)
report.show()
def get_dataframe():
raw_data = {
'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, 2, 3],
'postTestScore': [25, 94, 57, 62, 70],
'cost': [2523423.423432, 923423423.44, 243457, 4343462, 74343210],
}
return | pd.DataFrame(raw_data) | pandas.DataFrame |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
| tm.assert_numpy_array_equal(dense, data) | pandas.util.testing.assert_numpy_array_equal |
# -*- coding: utf-8 -*-
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Properties
----------
platform
'omni'
name
'hro'
tag
Select time between samples, one of {'1min', '5min'}
inst_id
None supported
Note
----
Files are stored by the first day of each month. When downloading use
omni.download(start, stop, freq='MS') to only download days that could possibly
have data. 'MS' gives a monthly start frequency.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Warnings
--------
- Currently no cleaning routine. Though the CDAWEB description indicates that
these level-2 products are expected to be ok.
- Module not written by OMNI team.
Custom Functions
----------------
time_shift_to_magnetic_poles
Shift time from bowshock to intersection with one of the magnetic poles
calculate_clock_angle
Calculate the clock angle and IMF mag in the YZ plane
calculate_imf_steadiness
Calculate the IMF steadiness using clock angle and magnitude in the YZ plane
calculate_dayside_reconnection
Calculate the dayside reconnection rate
"""
import datetime as dt
import functools
import numpy as np
import pandas as pds
import scipy.stats as stats
import warnings
from pysat import logger
from pysat.instruments.methods import general as mm_gen
from pysatNASA.instruments.methods import cdaweb as cdw
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'omni'
name = 'hro'
tags = {'1min': '1-minute time averaged data',
'5min': '5-minute time averaged data'}
inst_ids = {'': [tag for tag in tags.keys()]}
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'1min': dt.datetime(2009, 1, 1),
'5min': dt.datetime(2009, 1, 1)}}
# ----------------------------------------------------------------------------
# Instrument methods
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
ackn_str = ''.join(('For full acknowledgement info, please see: ',
'https://omniweb.gsfc.nasa.gov/html/citing.html'))
self.acknowledgements = ackn_str
self.references = ' '.join(('<NAME> and <NAME>, Solar',
'wind spatial scales in and comparisons',
'of hourly Wind and ACE plasma and',
'magnetic field data, J. Geophys. Res.,',
'Vol. 110, No. A2, A02209,',
'10.1029/2004JA010649.'))
logger.info(ackn_str)
return
def clean(self):
""" Cleaning function for OMNI data
Note
----
'clean' - Replace default fill values with NaN
'dusty' - Same as clean
'dirty' - Same as clean
'none' - Preserve original fill values
"""
for key in self.data.columns:
if key != 'Epoch':
fill = self.meta[key, self.meta.labels.fill_val][0]
idx, = np.where(self[key] == fill)
# Set the fill values to NaN
self[idx, key] = np.nan
# Replace the old fill value with NaN and add this to the notes
fill_notes = "".join(["Replaced standard fill value with NaN. ",
"Standard value was: {:}".format(
self.meta[key,
self.meta.labels.fill_val])])
notes = '\n'.join([str(self.meta[key, self.meta.labels.notes]),
fill_notes])
self.meta[key, self.meta.labels.notes] = notes
self.meta[key, self.meta.labels.fill_val] = np.nan
return
# ----------------------------------------------------------------------------
# Instrument functions
#
# Use the default CDAWeb and pysat methods
# Set the list_files routine
fname = ''.join(['omni_hro_{tag:s}_{{year:4d}}{{month:02d}}{{day:02d}}_',
'v{{version:02d}}.cdf'])
supported_tags = {inst_id: {tag: fname.format(tag=tag) for tag in tags.keys()}
for inst_id in inst_ids.keys()}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags,
file_cadence=pds.DateOffset(months=1))
# Set the load routine
load = functools.partial(cdw.load, file_cadence=pds.DateOffset(months=1))
# Set the download routine
remote_dir = '/pub/data/omni/omni_cdaweb/hro_{tag:s}/{{year:4d}}/'
download_tags = {inst_id: {tag: {'remote_dir': remote_dir.format(tag=tag),
'fname': supported_tags[inst_id][tag]}
for tag in inst_ids[inst_id]}
for inst_id in inst_ids.keys()}
download = functools.partial(cdw.download, supported_tags=download_tags)
# Set the list_remote_files routine
list_remote_files = functools.partial(cdw.list_remote_files,
supported_tags=download_tags)
# ----------------------------------------------------------------------------
# Local functions
def time_shift_to_magnetic_poles(inst):
""" OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Parameters
----------
inst : Instrument class object
Instrument with OMNI HRO data
Note
----
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
Warnings
--------
Use at own risk.
"""
# need to fill in Vx to get an estimate of what is going on
inst['Vx'] = inst['Vx'].interpolate('nearest')
inst['Vx'] = inst['Vx'].fillna(method='backfill')
inst['Vx'] = inst['Vx'].fillna(method='pad')
inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')
# make sure there are no gaps larger than a minute
inst.data = inst.data.resample('1T').interpolate('time')
time_x = inst['BSN_x'] * 6371.2 / -inst['Vx']
idx, = np.where(np.isnan(time_x))
if len(idx) > 0:
logger.info(time_x[idx])
logger.info(time_x)
time_x_offset = [pds.DateOffset(seconds=time)
for time in time_x.astype(int)]
new_index = []
for i, time in enumerate(time_x_offset):
new_index.append(inst.data.index[i] + time)
inst.data.index = new_index
inst.data = inst.data.sort_index()
return
def calculate_clock_angle(inst):
""" Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
"""
# Calculate clock angle in degrees
clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM']))
clock_angle[clock_angle < 0.0] += 360.0
inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index)
# Calculate magnitude of IMF in Y-Z plane
inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2
+ inst['BZ_GSM']**2),
index=inst.data.index)
return
def calculate_imf_steadiness(inst, steady_window=15, min_window_frac=0.75,
max_clock_angle_std=(90.0 / np.pi),
max_bmag_cv=0.5):
""" Calculate IMF steadiness using clock angle standard deviation and
the coefficient of variation of the IMF magnitude in the GSM Y-Z plane
Parameters
----------
inst : pysat.Instrument
Instrument with OMNI HRO data
steady_window : int
Window for calculating running statistical moments in min (default=15)
min_window_frac : float
Minimum fraction of points in a window for steadiness to be calculated
(default=0.75)
max_clock_angle_std : float
Maximum standard deviation of the clock angle in degrees (default=22.5)
max_bmag_cv : float
Maximum coefficient of variation of the IMF magnitude in the GSM
Y-Z plane (default=0.5)
"""
# We are not going to interpolate through missing values
rates = {'': 1, '1min': 1, '5min': 5}
sample_rate = int(rates[inst.tag])
max_wnum = np.floor(steady_window / sample_rate)
if max_wnum != steady_window / sample_rate:
steady_window = max_wnum * sample_rate
logger.warning("sample rate is not a factor of the statistical window")
logger.warning("new statistical window is {:.1f}".format(steady_window))
min_wnum = int(np.ceil(max_wnum * min_window_frac))
# Calculate the running coefficient of variation of the BYZ magnitude
byz_mean = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).mean()
byz_std = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).std()
inst['BYZ_CV'] = pds.Series(byz_std / byz_mean, index=inst.data.index)
# Calculate the running circular standard deviation of the clock angle
circ_kwargs = {'high': 360.0, 'low': 0.0, 'nan_policy': 'omit'}
try:
ca_std = \
inst['clock_angle'].rolling(min_periods=min_wnum,
window=steady_window,
center=True).apply(stats.circstd,
kwargs=circ_kwargs,
raw=True)
except TypeError:
warnings.warn(' '.join(['To automatically remove NaNs from the',
'calculation, please upgrade to scipy 1.4 or',
'newer']))
circ_kwargs.pop('nan_policy')
ca_std = \
inst['clock_angle'].rolling(min_periods=min_wnum,
window=steady_window,
center=True).apply(stats.circstd,
kwargs=circ_kwargs,
raw=True)
inst['clock_angle_std'] = | pds.Series(ca_std, index=inst.data.index) | pandas.Series |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": | pandas.StringDtype() | pandas.StringDtype |
from itertools import combinations
from abc import ABCMeta, abstractmethod
from skmob.utils import constants
from skmob.core.trajectorydataframe import TrajDataFrame
from tqdm import tqdm
import pandas as pd
from ..utils.utils import frequency_vector, probability_vector, date_time_precision
class Attack(object):
"""
Abstract class for a generic attack. Defines a series of functions common to all attacks.
Provides basic functions to compute risk for all users in a trajectory dataframe.
Requires the implementation of both a matching function and an assessment function, which are attack dependant.
:param knowledge_length: int
the length of the background knowledge that we want to simulate.
"""
__metaclass__ = ABCMeta
def __init__(self, knowledge_length):
self.knowledge_length = knowledge_length
@property
def knowledge_length(self):
return self._knowledge_length
@knowledge_length.setter
def knowledge_length(self, val):
if val < 1:
raise ValueError("Parameter knowledge_length should not be less than 1")
self._knowledge_length = val
def _all_risks(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Computes risk for all the users in the data. It applies the risk function to every individual in the data.
If it is not required to compute the risk for the entire data, the targets parameter can be used to select
a portion of users to perform the calculation on.
:param traj: TrajectoryDataFrame
the dataframe against which to calculate risk.
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param: show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
if targets is None:
targets = traj
else:
if isinstance(targets, list):
targets = traj[traj[constants.UID].isin(targets)]
if isinstance(targets, TrajDataFrame) or isinstance(targets, pd.DataFrame):
targets = traj[traj[constants.UID].isin(targets[constants.UID])]
if show_progress:
tqdm.pandas(desc="computing risk")
risks = targets.groupby(constants.UID).progress_apply(lambda x: self._risk(x, traj, force_instances))
else:
risks = targets.groupby(constants.UID).apply(lambda x: self._risk(x, traj, force_instances))
if force_instances:
risks = risks.droplevel(1)
risks = risks.reset_index(drop=True)
else:
risks = risks.reset_index(name=constants.PRIVACY_RISK)
return risks
def _generate_instances(self, single_traj):
"""
Return a generator to all the possible background knowledge of length k for a single user_id.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:return: generator
a generator to all the possible instances of length k. Instances are tuples with the values of the actual
records in the combination.
"""
size = len(single_traj.index)
if self.knowledge_length > size:
return combinations(single_traj.values, size)
else:
return combinations(single_traj.values, self.knowledge_length)
def _risk(self, single_traj, traj, force_instances=False):
"""
Computes the risk of reidentification of an individual with respect to the entire population in the data.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param traj: TrajectoryDataFrame
the dataframe with the complete data
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:return: float
the risk for the individual, expressed as a float between 0 and 1
"""
instances = self._generate_instances(single_traj)
risk = 0
if force_instances:
inst_data = {constants.LATITUDE: list(), constants.LONGITUDE: list(),
constants.DATETIME: list(), constants.UID: list(),
constants.INSTANCE: list(), constants.INSTANCE_ELEMENT: list(),
constants.PROBABILITY: list()}
inst_id = 1
for instance in instances:
prob = 1.0 / traj.groupby(constants.UID).apply(lambda x: self._match(x, instance)).sum()
elem_count = 1
for elem in instance:
inst_data[constants.LATITUDE].append(elem[0])
inst_data[constants.LONGITUDE].append(elem[1])
inst_data[constants.DATETIME].append(elem[2])
inst_data[constants.UID].append(elem[3])
inst_data[constants.INSTANCE].append(inst_id)
inst_data[constants.INSTANCE_ELEMENT].append(elem_count)
inst_data[constants.PROBABILITY].append(prob)
elem_count += 1
inst_id += 1
return pd.DataFrame(inst_data)
else:
for instance in instances:
prob = 1.0 / traj.groupby(constants.UID).apply(lambda x: self._match(x, instance)).sum()
if prob > risk:
risk = prob
if risk == 1.0:
break
return risk
@abstractmethod
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Abstract function to assess privacy risk for a whole dataframe of trajectories.
An attack must implement an assessing strategy. This could involve some preprocessing, for example
transforming the original data, and calls to the risk function.
If it is not required to compute the risk for the entire data, the targets parameter can be used to select
a portion of users to perform the assessment on.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
pass
@abstractmethod
def _match(self, single_traj, instance):
"""
Matching function for the attack. It is used to decide if an instance of background knowledge matches a certain
trajectory. The internal logic of an attack is represented by this function, therefore, it must be implemented
depending in the kind of the attack.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
pass
class LocationAttack(Attack):
"""
In a location attack the adversary knows the coordinates of the locations visited by an individual and matches them
against trajectories.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of
locations known to the adversary.
"""
def __init__(self, knowledge_length):
super(LocationAttack, self).__init__(knowledge_length)
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
traj = traj.sort_values(by=[constants.UID, constants.DATETIME])
return self._all_risks(traj, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a location attack, only the coordinates are used in the matching.
If a trajectory presents the same locations as the ones in the instance, a match is found.
Multiple visits to the same location are also handled.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
locs = single_traj.groupby([constants.LATITUDE, constants.LONGITUDE]).size().reset_index(name=constants.COUNT)
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
inst = inst.astype(dtype=dict(single_traj.dtypes))
inst = inst.groupby([constants.LATITUDE, constants.LONGITUDE]).size().reset_index(name=constants.COUNT + "inst")
locs_inst = pd.merge(locs, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],
right_on=[constants.LATITUDE, constants.LONGITUDE])
if len(locs_inst.index) != len(inst.index):
return 0
else:
condition = locs_inst[constants.COUNT] >= locs_inst[constants.COUNT + "inst"]
if len(locs_inst[condition].index) != len(inst.index):
return 0
else:
return 1
class LocationSequenceAttack(Attack):
"""
In a location sequence attack the adversary knows the coordinates of locations visited by an individual and
the order in which they were visited and matches them against trajectories.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of
locations known to the adversary.
"""
def __init__(self, knowledge_length):
super(LocationSequenceAttack, self).__init__(knowledge_length)
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
traj = traj.sort_values(by=[constants.UID, constants.DATETIME])
return self._all_risks(traj, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a location sequence attack, both the coordinates and the order of visit are used in the matching.
If a trajectory presents the same locations in the same order as the ones in the instance, a match is found.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
inst_iterator = inst.iterrows()
inst_line = next(inst_iterator)[1]
count = 0
for index, row in single_traj.iterrows():
if inst_line[constants.LATITUDE] == row[constants.LATITUDE] and inst_line[constants.LONGITUDE] == row[
constants.LONGITUDE]:
count += 1
try:
inst_line = next(inst_iterator)[1]
except StopIteration:
break
if len(inst.index) == count:
return 1
else:
return 0
class LocationTimeAttack(Attack):
"""
In a location time attack the adversary knows the coordinates of locations visited by an individual and the time
in which they were visited and matches them against trajectories. The precision at which to consider the temporal
information can also be specified.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of
locations with timestamps known to the adversary.
:param time_precision: string, default 'Hour'
the precision at which to consider the timestamps for the visits.
The possible precisions are: Year, Month, Day, Hour, Minute, Second.
"""
def __init__(self, knowledge_length, time_precision="Hour"):
self.time_precision = time_precision
super(LocationTimeAttack, self).__init__(knowledge_length)
@property
def time_precision(self):
return self._time_precision
@time_precision.setter
def time_precision(self, val):
if val not in constants.PRECISION_LEVELS:
raise ValueError("Possible time precisions are: Year, Month, Day, Hour, Minute, Second")
self._time_precision = val
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
traj = traj.sort_values(by=[constants.UID, constants.DATETIME])
traj[constants.TEMP] = traj[constants.DATETIME].apply(lambda x: date_time_precision(x, self.time_precision))
return self._all_risks(traj, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a location time attack, both the coordinates and the order of visit are used in the matching.
If a trajectory presents the same locations with the same temporal information as in the instance,
a match is found.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE, constants.TEMP],
right_on=[constants.LATITUDE, constants.LONGITUDE, constants.TEMP])
if len(locs_inst.index) == len(inst.index):
return 1
else:
return 0
class UniqueLocationAttack(Attack):
"""
In a unique location attack the adversary knows the coordinates of unique locations visited by an individual,
and matches them against frequency vectors. A frequency vector, is an aggregation on trajectory
data showing the unique locations visited by an individual and the frequency with which he visited those locations.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of unique
locations known to the adversary.
"""
def __init__(self, knowledge_length):
super(UniqueLocationAttack, self).__init__(knowledge_length)
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
Internally performs the conversion to frequency vectors.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
freq = frequency_vector(traj)
return self._all_risks(freq, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a unique location attack, the coordinates of unique locations are used in the matching.
If a frequency vector presents the same locations as in the instance, a match is found.
:param single_traj: TrajectoryDataFrame
the dataframe of the frequency vector of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],
right_on=[constants.LATITUDE, constants.LONGITUDE])
if len(locs_inst.index) == len(inst.index):
return 1
else:
return 0
class LocationFrequencyAttack(Attack):
"""
In a location frequency attack the adversary knows the coordinates of the unique locations visited by an individual
and the frequency with which he visited them, and matches them against frequency vectors. A frequency vector,
is an aggregation on trajectory data showing the unique locations visited by an individual and the frequency
with which he visited those locations.
It is possible to specify a tolerance level for the matching of the frequency.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of unique
locations and their frequency known to the adversary.
:param tolerance: float, default 0
the tolarance with which to match the frequency. It can assume values between 0 and 1.
"""
def __init__(self, knowledge_length, tolerance=0.0):
self.tolerance = tolerance
super(LocationFrequencyAttack, self).__init__(knowledge_length)
@property
def tolerance(self):
return self._tolerance
@tolerance.setter
def tolerance(self, val):
if val > 1.0 or val < 0.0:
raise ValueError("Tolerance should be in the interval [0.0,1.0]")
self._tolerance = val
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
Internally performs the conversion to frequency vectors.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
freq = frequency_vector(traj)
return self._all_risks(freq, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a frequency location attack, the coordinates of unique locations and their frequency of visit are used
in the matching. If a frequency vector presents the same locations with the same frequency as in the instance,
a match is found. The tolerance level specified at construction is used to construct and interval of frequency
and allow for less precise matching.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
inst.rename(columns={constants.FREQUENCY: constants.FREQUENCY + "inst"}, inplace=True)
locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],
right_on=[constants.LATITUDE, constants.LONGITUDE])
if len(locs_inst.index) != len(inst.index):
return 0
else:
condition1 = locs_inst[constants.FREQUENCY + "inst"] >= locs_inst[constants.FREQUENCY] - (
locs_inst[constants.FREQUENCY] * self.tolerance)
condition2 = locs_inst[constants.FREQUENCY + "inst"] <= locs_inst[constants.FREQUENCY] + (
locs_inst[constants.FREQUENCY] * self.tolerance)
if len(locs_inst[condition1 & condition2].index) != len(inst.index):
return 0
else:
return 1
class LocationProbabilityAttack(Attack):
"""
In a location probability attack the adversary knows the coordinates of
the unique locations visited by an individual and the probability with which he visited them,
and matches them against probability vectors.
A probability vector, is an aggregation on trajectory data showing the unique locations visited by an individual
and the probability with which he visited those locations.
It is possible to specify a tolerance level for the matching of the probability.
:param knowledge_length: int
the length of the background knowledge that we want to simulate. For this attack, it is the number of unique
locations and their probability known to the adversary.
:param tolerance: float, default 0
the tolarance with which to match the frequency. It can assume values between 0 and 1.
"""
def __init__(self, knowledge_length, tolerance=0.0):
self.tolerance = tolerance
super(LocationProbabilityAttack, self).__init__(knowledge_length)
@property
def tolerance(self):
return self._tolerance
@tolerance.setter
def tolerance(self, val):
if val > 1.0 or val < 0.0:
raise ValueError("Tolerance should be in the interval [0.0,1.0]")
self._tolerance = val
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
"""
Assess privacy risk for a whole dataframe of trajectories.
Internally performs the conversion to probability vectors.
:param traj: TrajectoryDataFrame
the dataframe on which to assess privacy risk
:param targets: TrajectoryDataFrame or list, default None
the users_id target of the attack. They must be compatible with the trajectory data. Default values is None
in which case risk is computed on all users in traj
:param force_instances: boolean, default False
if True, returns all possible instances of background knowledge
with their respective probability of reidentification
:param show_progress: boolean, default False
if True, shows the progress of the computation
:return: Pandas DataFrame
a DataFrame in the form (user_id, risk)
"""
prob = probability_vector(traj)
return self._all_risks(prob, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
"""
Matching function for the attack.
For a probability location attack, the coordinates of unique locations and their probability of visit are used
in the matching.
If a probability vector presents the same locations with the same probability as in the instance,
a match is found.
The tolerance level specified at construction is used to build and interval of probability and allow
for less precise matching.
:param single_traj: TrajectoryDataFrame
the dataframe of the trajectory of a single individual
:param instance: tuple
an instance of background knowledge
:return: int
1 if the instance matches the trajectory, 0 otherwise.
"""
inst = | pd.DataFrame(data=instance, columns=single_traj.columns) | pandas.DataFrame |
#!/usr/bin/env python
###
# File Created: Wednesday, February 6th 2019, 9:05:13 pm
# Author: <NAME> <EMAIL>
# Modified By: <NAME>
# Last Modified: Friday, February 8th 2019, 1:06:21 pm
###
import os
from os.path import isfile, join, split
import glob
import pandas as pd
# Timeseries data
import datetime
import numpy as np
from pandas.tseries.frequencies import to_offset
def get_avg_losses():
"""Returns pandas df of average losses over all runs of all files
Returns:
pd.df -- df of averages over all funs of all runs of files
"""
# Returns pd df of fullpaths, paths and filenames
files = files_to_df('../output/final/*/*.csv')
list_num_files('../output/final')
# Iterate over all runs for every unique filename,
# resample df to 2S interval,
# create losses df,
# concat mean to avg loss df
print('List of filenames:')
print('---------------------------')
avg_losses = pd.DataFrame()
for fn in files['filename'].unique():
name = fn.split('.')[0]
print(fn)
paths = files[files['filename'] == fn]['fullpath']
losses = | pd.DataFrame() | pandas.DataFrame |
import pygmalion as ml
import matplotlib.pyplot as plt
import pandas as pd
import pathlib
import IPython
path = pathlib.Path(__file__).parent
data_path = path / "data"
# Download the data
ml.datasets.airline_tweets(data_path)
df = | pd.read_csv(data_path / "airline_tweets.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import math
import os
import time
from DataCleanService.src.main.utils.utils import remove_gz_suffix, remove_gz_suffix_for_condo
from DataCleanService.src.main.config import constants, DataCleanServiceConfig
import glob
# TODO: data format exception (str, float...)
def select_related_rows(df, prefix):
df = df[df['Taxes'] != 0]
if prefix == 'Sold':
df.dropna(subset=['Cd'], inplace=True)
df = df[df['Sp_dol'] > 50000]
# TODO: Remove this constraint
# df['lp/sp'] = abs(df['Lp_dol'] - df['Sp_dol']) / df['Sp_dol']
# df = df[df['lp/sp'] <= 0.3]
# df.drop(columns=['lp/sp'], inplace=True)
if prefix == 'Listing':
df = df[df['Lp_dol'] > 50000]
df.drop(columns=['Sp_dol', 'Cd'], inplace=True)
year, month, day, hour, minute = time.strftime("%Y,%m,%d,%H,%M").split(',')
cur_date = str(year) + '-' + str(month) + '-' + str(day)
df['Cd'] = cur_date
df.index = range(len(df))
return df
def complement_null(df, depth_median, front_median):
df[constants.CMPLMT_NONE_COL] = df[constants.CMPLMT_NONE_COL].fillna(value='None')
df[constants.CMPLMT_ZERO_COL] = df[constants.CMPLMT_ZERO_COL].fillna(value=0)
df['Den_fr'] = df['Den_fr'].fillna(value='N')
# Depth / Front_ft: Condo related cols -> 0 House-related cols -> median
df_cdhs = df[df['Type_own1_out'].isin(constants.CDHS_LABEL)][['Depth', 'Front_ft']]
df_part_hs = df[~df['Type_own1_out'].isin(constants.CDHS_LABEL)][['Depth', 'Front_ft']]
df_cdhs['Depth'] = df_cdhs['Depth'].fillna(value=0)
df_cdhs['Front_ft'] = df_cdhs['Front_ft'].fillna(value=0)
if (depth_median == 0) & (front_median == 0):
depth_median = df_part_hs['Depth'].median()
front_median = df_part_hs['Front_ft'].median()
median = [[depth_median, front_median]]
df_median = pd.DataFrame(median, columns=['depth_median', 'front_median'])
df_median.to_csv(DataCleanServiceConfig.CLEAN_DATA_MEDIAN_FILE, index=None)
df_part_hs['Depth'] = df_part_hs['Depth'].fillna(value=depth_median)
df_part_hs['Front_ft'] = df_part_hs['Front_ft'].fillna(value=front_median)
depth_front = pd.concat([df_cdhs, df_part_hs], ignore_index=False)
df = df.join(depth_front, lsuffix='_x', rsuffix='')
df.drop(columns=['Depth_x', 'Front_ft_x'], inplace=True)
return df
def process_cols(df, comm_list):
# Process Area code
df.Area_code = df.Area_code.astype(str)
df['Area_code'] = df.Area_code.str.extract('(\d+)', expand=True).astype(float)
# Process Garage
df['Garage'] = df['Gar'] + df['Gar_spaces']
df.drop(columns=['Gar', 'Gar_spaces'], inplace=True)
# Process lat & lng
df['lng'] = df['lng'].apply(lambda x: x * (-1))
# Process Community
if comm_list is None:
cm_count = df.Community.value_counts()
cm_h = {cm_count.index[i]: cm_count.values[i] for i in range(len(cm_count.values)) if
cm_count.values[i] > constants.COMM_TH}
selected_cm = [*(cm_h.keys())]
df_comm = pd.DataFrame(selected_cm, columns=['Comm'])
df_comm.to_csv(DataCleanServiceConfig.COMM_FILE, index=None)
else:
selected_cm = comm_list
df.Community.where(df['Community'].isin(selected_cm), 'Other', inplace=True)
return df
def process_date(df):
df['Cd'] = pd.to_datetime(df['Cd'])
df['month'] = df.Cd.dt.month
df.index = range(len(df))
month_dic = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct',
11: 'Nov', 12: 'Dec'}
df_month = pd.DataFrame(0, index=np.arange(len(df)), columns=constants.MONTH)
df = pd.concat([df, df_month], axis=1)
for i, month in enumerate(df['month']):
df.loc[i, month_dic[month]] = 1
df.drop(columns='month', inplace=True)
return df
def rooms_total_area(df):
df['area'] = 0
for i in range(1, 13):
df['area'] += df['Rm' + str(i) + '_len'] * df['Rm' + str(i) + '_wth']
double_rm = (df[constants.RM_LEN_WTH] != 0).sum(axis=1)
df['rm_num'] = double_rm / 2.0
df = df[df['rm_num'] != 0]
df['ave_area'] = df['area'] / df['rm_num']
# Reset index
df.index = range(len(df))
for i, area in enumerate(df['ave_area']):
if (area > 1) & (area < 100):
df.loc[i, 'Rooms_total_area'] = df.loc[i, 'area']
elif (area >= 100) & (area < 700):
df.loc[i, 'Rooms_total_area'] = df.loc[i, 'area'] / 10.7584 # 3.28 * 3.28
elif (area >= 700) & (area < 8000):
df.loc[i, 'Rooms_total_area'] = df.loc[i, 'area'] / 100.0
elif (area >= 8000) & (area < 22500):
df.loc[i, 'Rooms_total_area'] = df.loc[i, 'area'] / 1075.84 # 32.8 * 32.8
else:
df.loc[i, 'Rooms_total_area'] = df.loc[i, 'rm_num'] * 25.0
df.drop(columns=['area', 'rm_num', 'ave_area'], inplace=True)
df.index = range(len(df))
df_area = pd.DataFrame(0, index=np.arange(len(df)), columns=constants.DISCRETE_ROOM_AREA)
df = pd.concat([df, df_area], axis=1)
for i, area in enumerate(df['Rooms_total_area']):
if area < 50:
df.loc[i, 'less-than50'] = 1
elif (area >= 50) & (area < 100):
df.loc[i, '50-to-100'] = 1
elif (area >= 100) & (area < 150):
df.loc[i, '100-to-150'] = 1
elif (area >= 150) & (area < 200):
df.loc[i, '150-to-200'] = 1
elif (area >= 200) & (area < 250):
df.loc[i, '200-to-250'] = 1
elif (area >= 250) & (area < 350):
df.loc[i, '250-to-350'] = 1
else:
df.loc[i, 'larger-than350'] = 1
df.drop(columns='Rooms_total_area', inplace=True)
df.drop(columns=constants.RM_LEN_WTH, inplace=True)
return df
def drop_cols(df):
df.drop(columns=['Lsc', 'S_r'], inplace=True)
df.dropna(inplace=True)
df.drop_duplicates(keep='last', inplace=True)
return df
def clean_whole_data(df_raw_data=None, raw_data_file=None, lsc='Sld', s_r='Sale'):
if df_raw_data is not None:
df_data = df_raw_data
elif raw_data_file is not None:
df_data = pd.read_csv(raw_data_file, sep=',')
else:
print("No data / data file to clean!")
# Select house records: type is house & house-related columns
df_hs = \
df_data.loc[
(df_data['Type_own1_out'].isin(constants.HS_LABEL)) & (df_data['Lsc'] == lsc) & (df_data['S_r'] == s_r)][
constants.COLUMNS_HS]
if os.path.isfile(DataCleanServiceConfig.COMM_FILE):
os.remove(DataCleanServiceConfig.COMM_FILE)
if os.path.isfile(DataCleanServiceConfig.CLEAN_DATA_MEDIAN_FILE):
os.remove(DataCleanServiceConfig.CLEAN_DATA_MEDIAN_FILE)
print("Start select_related_rows...")
df_hs = select_related_rows(df_hs, prefix='Sold')
print("Start complement null...")
df_hs = complement_null(df_hs, 0, 0)
print("Start process_cols...")
df_hs = process_cols(df_hs, None)
print("Start process_date...")
df_hs = process_date(df_hs)
print("Start calculate rooms_total_area...")
df_hs = rooms_total_area(df_hs)
drop_cols(df_hs)
print("Sorting date...")
df_hs['Cd'] = pd.to_datetime(df_hs.Cd)
df_hs.sort_values(by=['Cd'], ascending=True, inplace=True)
print(len(df_hs))
# TODO:
# Change file name
df_hs.to_csv(DataCleanServiceConfig.CLEAN_HOUSE_DATA, index=False)
# df_hs.to_csv(DataCleanServiceConfig.DATA_PATH + 'clean_data_515.csv', index=False)
return df_hs
def clean_house_increment_data(df_hs, prefix):
if len(df_hs) == 0:
return None
print("Start select_related_rows...")
df_hs = select_related_rows(df_hs, prefix)
if len(df_hs) == 0:
return None
if os.path.isfile(DataCleanServiceConfig.CLEAN_DATA_MEDIAN_FILE):
print("Start complement null...")
df_median = pd.read_csv(DataCleanServiceConfig.CLEAN_DATA_MEDIAN_FILE)
depth_median = df_median['depth_median'].values[0]
front_median = df_median['front_median'].values[0]
print(depth_median, front_median)
df_hs = complement_null(df_hs, depth_median, front_median)
else:
print("Error! No median file found!")
return
if os.path.isfile(DataCleanServiceConfig.COMM_FILE):
comm_list = pd.read_csv(DataCleanServiceConfig.COMM_FILE)['Comm'].values.tolist()
df_hs = process_cols(df_hs, comm_list)
else:
print('Error! No community file found!')
df_hs = process_date(df_hs)
df_hs = rooms_total_area(df_hs)
drop_cols(df_hs)
if len(df_hs) == 0:
return None
print("Sorting date...")
df_hs['Cd'] = | pd.to_datetime(df_hs.Cd) | pandas.to_datetime |
"""
Monte Carlo-type tests for the BM model
Note that that the actual tests that run are just regression tests against
previously estimated values with small sample sizes that can be run quickly
for continuous integration. However, this file can be used to re-run (slow)
large-sample Monte Carlo tests.
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from scipy.signal import lfilter
from statsmodels.tsa.statespace import (
dynamic_factor_mq, sarimax, varmax, dynamic_factor)
def simulate_k_factor1(nobs=1000):
mod_sim = dynamic_factor.DynamicFactor(np.zeros((1, 4)), k_factors=1,
factor_order=1, error_order=1)
loadings = [1.0, -0.75, 0.25, -0.3, 0.5]
p = np.r_[loadings[:mod_sim.k_endog],
[10] * mod_sim.k_endog,
0.5,
[0.] * mod_sim.k_endog]
ix = pd.period_range(start='1935-01', periods=nobs, freq='M')
endog = pd.DataFrame(mod_sim.simulate(p, nobs), index=ix)
true = pd.Series(p, index=mod_sim.param_names)
# Compute levels series (M and Q)
ix = pd.period_range(start=endog.index[0] - 1, end=endog.index[-1],
freq=endog.index.freq)
levels_M = 1 + endog.reindex(ix) / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# This is an alternative way to compute the quarterly levels
# endog_M = endog.iloc[:, :3]
# x = endog.iloc[:, 3:]
# endog_Q = (x + 2 * x.shift(1) + 3 * x.shift(2) + 2 * x.shift(3) +
# x.shift(4)).resample('Q', convention='e').last().iloc[:-1] / 3
# levels_Q = 1 + endog.iloc[:, 3:] / 100
# levels_Q.iloc[0] = 100
# Here is another alternative way to compute the quarterly levels
# weights = np.array([1, 2, 3, 2, 1])
# def func(x, weights):
# return np.sum(weights * x)
# r = endog_M.rolling(5)
# (r.apply(func, args=(weights,), raw=False).resample('Q', convention='e')
# .last().iloc[:-1].tail())
# Compute the growth rate series that we'll actually run the model on
endog_M = log_levels_M.iloc[:, :3].diff()
endog_Q = log_levels_Q.iloc[:, 3:].diff()
return endog_M, endog_Q, log_levels_M, log_levels_Q, true
def simulate_k_factors3_blocks2(nobs=1000, idiosyncratic_ar1=False):
# Simulate the first two factors
ix = pd.period_range(start='2000-01', periods=1, freq='M')
endog = pd.DataFrame(np.zeros((1, 2)), columns=['f1', 'f2'], index=ix)
mod_f_12 = varmax.VARMAX(endog, order=(1, 0), trend='n')
params = [0.5, 0.1, -0.2, 0.9, 1.0, 0, 1.0]
f_12 = mod_f_12.simulate(params, nobs)
# Simulate the third factor
endog = | pd.Series([0], name='f3', index=ix) | pandas.Series |
import sys
import time
from multiprocessing import Pool
from unittest import TestCase, main
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_series_equal, assert_frame_equal
sys.path.append("../")
from valhalla.extract import DataExtractor
"""
test.h5
내부 데이터는 총 4개의 group(bcateid, price, model, pid)로 구성되어 있고,
카카오에서 제공한 데이터와 동일한 포맷으로 구성되어 있음.
DataLoader의 동작이 제대로 되는지 테스트하기 위한 코드로, 데이터로서의 의미를 가지는 건 아니다
bcateid price model pid
0 24 -1 Q4081781803
1 17 -1 W4203425504
2 24 84750 인터파크/오피스메인/프린터/라벨/도트/바코드/기타/프린터 기타 G4453903364
3 35 -1 중성펜/젤러펜 필기구 볼펜류 볼펜심제브라 Refill SK U4418629259
4 40 -1 무형광아기세탁망원형[45cm] I4066071748
5 54 87210 근조화환 J4586931195
6 35 -1 기타 F4662379886
7 34 -1 O3764058858
8 14 966000 인터파크/에트로/여성가방/숄더백(천연가죽) J3959473240
9 3 16620 인터파크/얀케이스/스마트폰/태블릿케이스/태블릿케이스/파우치/갤럭시용케이스/파우치 K4487826783
"""
class DataLoaderSimpleTest(TestCase):
def setUp(self):
self.dl = DataExtractor("test.h5", 'train')
def tearDown(self):
del self.dl
def test_init_dataloader(self):
pass
def test_length_of_dataloader(self):
self.assertEqual(len(self.dl), 10)
def test_columns_of_dataloader(self):
answer = ['bcateid', 'price', 'model', 'pid']
self.assertEqual(len(answer), len(self.dl.columns)) # 길이 같은지 확인
self.assertListEqual(list(set(answer)), list(
set(self.dl.columns))) # Element 같은지 확인
def test_get_item_by_column_name_bcateid(self):
pred = self.dl['bcateid']
answer = pd.Series([24, 17, 24, 35, 40, 54, 35, 34, 14, 3],
dtype='int32', name='bcateid')
assert_series_equal(pred, answer)
def test_get_item_by_coumn_name_model(self):
pred = self.dl['model']
answer = pd.Series(["",
"",
"인터파크/오피스메인/프린터/라벨/도트/바코드/기타/프린터 기타",
'중성펜/젤러펜 필기구 볼펜류 볼펜심제브라 Refill SK',
'무형광아기세탁망원형[45cm]',
'근조화환',
'기타',
'',
'인터파크/에트로/여성가방/숄더백(천연가죽)',
'인터파크/얀케이스/스마트폰/태블릿케이스/태블릿케이스/파우치/갤럭시용케이스/파우치'],
name='model')
assert_series_equal(pred, answer)
def test_get_item_by_multiple_column(self):
pred = self.dl[['bcateid', 'price']]
answer = pd.DataFrame([[24, -1],
[17, -1],
[24, 84750],
[35, -1],
[40, -1],
[54, 87210],
[35, -1],
[34, -1],
[14, 966000],
[3, 16620]],
columns=['bcateid', 'price'], dtype='int32')
assert_frame_equal(pred, answer)
def test_get_item_by_column_and_index(self):
pred = self.dl['bcateid', 0]
answer = pd.Series([24], name='bcateid')
assert_series_equal(pred, answer)
def test_get_item_by_column_and_slice(self):
pred = self.dl['bcateid', 0:3]
answer = pd.Series([24, 17, 24], name='bcateid', dtype='int32')
assert_series_equal(pred, answer)
def test_get_item_by_multiple_column_and_slice(self):
pred = self.dl[['bcateid', 'price'], 0:3]
answer = pd.DataFrame([[24, -1],
[17, -1],
[24, 84750]],
columns=['bcateid', 'price'], dtype='int32')
assert_frame_equal(pred, answer)
def test_get_item_by_multiple_column_and_list(self):
pred = self.dl[['bcateid', 'price'], [0, 3]]
answer = pd.DataFrame([[24, -1],
[35, -1]],
columns=['bcateid', 'price'], dtype='int32')
| assert_frame_equal(pred, answer) | pandas.util.testing.assert_frame_equal |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
from pipeline import experiment, ephys, psth
from pipeline.plot import (_plot_with_sem, _extract_one_stim_dur, _get_units_hemisphere,
_plot_stacked_psth_diff, _plot_avg_psth,
_get_photostim_time_and_duration, _get_trial_event_times,
jointplot_w_hue)
m_scale = 1200
_plt_xmin = -3
_plt_xmax = 2
def plot_clustering_quality(probe_insertion):
probe_insertion = probe_insertion.proj()
amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat
* ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')
metrics = {'amp': amp,
'snr': snr,
'isi': np.array(isi_violation) * 100, # to percentage
'rate': np.array(spk_rate)}
label_mapper = {'amp': 'Amplitude',
'snr': 'Signal to noise ratio (SNR)',
'isi': 'ISI violation (%)',
'rate': 'Firing rate (spike/s)'}
fig, axs = plt.subplots(2, 3, figsize=(12, 8))
fig.subplots_adjust(wspace=0.4)
for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):
ax.plot(metrics[m1], metrics[m2], '.k')
ax.set_xlabel(label_mapper[m1])
ax.set_ylabel(label_mapper[m2])
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_unit_characteristic(probe_insertion, axs=None):
probe_insertion = probe_insertion.proj()
amp, snr, spk_rate, x, y, insertion_depth = (
ephys.Unit * ephys.ProbeInsertion.InsertionLocation * ephys.UnitStat
& probe_insertion & 'unit_quality != "all"').fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'unit_posx', 'unit_posy', 'dv_location')
insertion_depth = np.where(np.isnan(insertion_depth), 0, insertion_depth)
metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(), x, y + insertion_depth))))
metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)
# cosmetic
for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(title)
ax.set_xlim((-10, 60))
def plot_unit_selectivity(probe_insertion, axs=None):
probe_insertion = probe_insertion.proj()
attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',
'ipsi_firing_rate', 'unit_posx', 'unit_posy', 'dv_location']
selective_units = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* experiment.Period & probe_insertion & 'period_selectivity != "non-selective"').fetch(*attr_names)
selective_units = pd.DataFrame(selective_units).T
selective_units.columns = attr_names
selective_units.period_selectivity.astype('category')
# --- account for insertion depth (manipulator depth)
selective_units.unit_posy = (selective_units.unit_posy
+ np.where(np.isnan(selective_units.dv_location.values.astype(float)),
0, selective_units.dv_location.values.astype(float)))
# --- get ipsi vs. contra firing rate difference
f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)
selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()
# --- prepare for plotting
cosmetic = {'legend': None,
'linewidth': 0.0001}
ymax = selective_units.unit_posy.max() + 100
# a bit of hack to get 'open circle'
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .7]
open_circle = mpl.path.Path(vert)
# --- plot
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
for (title, df), ax in zip(((p, selective_units[selective_units.period == p])
for p in ('sample', 'delay', 'response')), axs):
sns.scatterplot(data=df, x='unit_posx', y='unit_posy',
s=df.f_rate_diff.values.astype(float)*m_scale,
hue='period_selectivity', marker=open_circle,
palette={'contra-selective': 'b', 'ipsi-selective': 'r'},
ax=ax, **cosmetic)
contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{title}\n% contra: {contra_p:.2f}\n% ipsi: {100-contra_p:.2f}')
ax.set_xlim((-10, 60))
# ax.set_ylim((0, ymax))
def plot_unit_bilateral_photostim_effect(probe_insertion, axs=None):
probe_insertion = probe_insertion.proj()
cue_onset = (experiment.Period & 'period = "delay"').fetch1('period_start')
no_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim'}).fetch1('KEY')
bi_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_stim'}).fetch1('KEY')
# get photostim duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& probe_insertion).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
units = ephys.Unit & probe_insertion & 'unit_quality != "all"'
metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])
# XXX: could be done with 1x fetch+join
for u_idx, unit in enumerate(units.fetch('KEY')):
x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')
nostim_psth, nostim_edge = (
psth.UnitPsth & {**unit, **no_stim_cond}).fetch1('unit_psth')
bistim_psth, bistim_edge = (
psth.UnitPsth & {**unit, **bi_stim_cond}).fetch1('unit_psth')
# compute the firing rate difference between contra vs. ipsi within the stimulation duration
ctrl_frate = nostim_psth[np.logical_and(nostim_edge[1:] >= cue_onset, nostim_edge[1:] <= cue_onset + stim_dur)]
stim_frate = bistim_psth[np.logical_and(bistim_edge[1:] >= cue_onset, bistim_edge[1:] <= cue_onset + stim_dur)]
frate_change = np.abs(stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()
metrics.loc[u_idx] = (int(unit['unit']), x, y, frate_change)
metrics.frate_change = metrics.frate_change / metrics.frate_change.max()
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(4, 8))
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,
ax=axs, **cosmetic)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('% change')
axs.set_xlim((-10, 60))
def plot_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]
period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0],
vlines=period_starts, flip=True)
_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1],
vlines=period_starts)
# cosmetic
for ax, title in zip(axs, ('Contra-selective Units', 'Ipsi-selective Units')):
ax.set_title(title)
ax.set_ylabel('Unit')
ax.set_xlabel('Time to go-cue (s)')
ax.set_xlim([_plt_xmin, _plt_xmax])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_selectivity_sorted_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]
period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
# ---- separate units to:
# i) sample or delay not response:
sample_delay_units = units & (psth.PeriodSelectivity
& 'period in ("sample", "delay")'
& 'period_selectivity != "non-selective"')
sample_delay_units = sample_delay_units & (psth.PeriodSelectivity & units
& 'period = "response"'
& 'period_selectivity = "non-selective"')
# ii) sample or delay and response:
sample_delay_response_units = units & (psth.PeriodSelectivity
& 'period in ("sample", "delay")'
& 'period_selectivity != "non-selective"')
sample_delay_response_units = sample_delay_response_units & (psth.PeriodSelectivity & units
& 'period = "response"'
& 'period_selectivity != "non-selective"')
# iii) not sample nor delay and response:
response_units = (units & (psth.PeriodSelectivity & 'period in ("sample")'
& 'period_selectivity = "non-selective"')
& (psth.PeriodSelectivity & 'period in ("delay")'
& 'period_selectivity = "non-selective"'))
response_units = response_units & (psth.PeriodSelectivity & units
& 'period = "response"'
& 'period_selectivity != "non-selective"')
ipsi_selective_psth, contra_selective_psth = [], []
for units in (sample_delay_units, sample_delay_response_units, response_units):
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i & conds_i).fetch()
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i & conds_c).fetch()
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c & conds_c).fetch()
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c & conds_i).fetch()
contra_selective_psth.append(_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], flip=True, plot=False))
ipsi_selective_psth.append(_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], plot=False))
contra_boundaries = np.cumsum([len(k) for k in contra_selective_psth[::-1]])
ipsi_boundaries = np.cumsum([len(k) for k in ipsi_selective_psth[::-1]])
contra_selective_psth = np.vstack(contra_selective_psth)
ipsi_selective_psth = np.vstack(ipsi_selective_psth)
xlim = -3, 2
im = axs[0].imshow(contra_selective_psth, cmap=plt.cm.bwr,
aspect=4.5/contra_selective_psth.shape[0],
extent=[-3, 3, 0, contra_selective_psth.shape[0]])
im.set_clim((-1, 1))
im = axs[1].imshow(ipsi_selective_psth, cmap=plt.cm.bwr,
aspect=4.5/ipsi_selective_psth.shape[0],
extent=[-3, 3, 0, ipsi_selective_psth.shape[0]])
im.set_clim((-1, 1))
# cosmetic
for ax, title, hspans in zip(axs, ('Contra-selective Units', 'Ipsi-selective Units'),
(contra_boundaries, ipsi_boundaries)):
for x in period_starts:
ax.axvline(x=x, linestyle='--', color='k')
ax.set_title(title)
ax.set_ylabel('Unit')
ax.set_xlabel('Time to go-cue (s)')
ax.set_xlim(xlim)
for ystart, ystop, color in zip([0]+list(hspans[:-1]), hspans, ('k', 'grey', 'w')):
ax.axhspan(ystart, ystop, 0.98, 1, alpha=1, color=color)
def plot_avg_contra_ipsi_psth(units, axs=None):
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
period_starts = (experiment.Period
& 'period in ("sample", "delay", "response")').fetch(
'period_start')
hemi = _get_units_hemisphere(units)
good_unit = ephys.Unit & 'unit_quality != "all"'
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
psth_is_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_is_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
_plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],
'Contra-selective')
_plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],
'Ipsi-selective')
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
def plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):
"""
For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction
The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: bilateral ALM)
"""
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
hemi = _get_units_hemisphere(units)
period_starts = (experiment.Period
& 'period in ("sample", "delay", "response")').fetch(
'period_start')
# no photostim:
psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]
psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]
psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')
# get photostim duration and stim time (relative to go-cue)
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_time, stim_dur = _get_photostim_time_and_duration(units,
psth.TrialCondition().get_trials(stim_trial_cond_name))
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
# add shaded bar for photostim
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
def plot_selectivity_change_photostim_effect(units, condition_name_kw, recover_time_window=None, ax=None):
"""
For each unit in the specified units, extract:
+ control, left-instruct PSTH (ctrl_left)
+ control, right-instruct PSTH (ctrl_right)
+ stim, left-instruct PSTH (stim_left)
+ stim, right-instruct PSTH (stim_right)
Then, control_PSTH and stim_PSTH is defined as
(ctrl_left - ctrl_right) for ipsi-selective unit that locates on the left-hemisphere, and vice versa
(stim_left - stim_right) for ipsi-selective unit that locates on the left-hemisphere, and vice versa
Selectivity change is then defined as: control_PSTH - stim_PSTH
"""
trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]
period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_time, stim_dur = _get_photostim_time_and_duration(units,
psth.TrialCondition().get_trials(stim_trial_cond_name))
ctrl_left_cond_name = 'all_noearlylick_nostim_left'
ctrl_right_cond_name = 'all_noearlylick_nostim_right'
stim_left_cond_name = psth.TrialCondition().get_cond_name_from_keywords(condition_name_kw
+ ['noearlylick', 'stim', 'left'])[0]
stim_right_cond_name = psth.TrialCondition().get_cond_name_from_keywords(condition_name_kw
+ ['noearlylick', 'stim', 'right'])[0]
delta_sels, ctrl_psths = [], []
for unit in (units * psth.UnitSelectivity & 'unit_selectivity != "non-selective"').proj('unit_selectivity').fetch(as_dict=True):
# ---- trial count criteria ----
# no less than 5 trials for control
if (len(psth.TrialCondition.get_trials(ctrl_left_cond_name) & unit) < 5
or len(psth.TrialCondition.get_trials(ctrl_right_cond_name) & unit) < 5):
continue
# no less than 2 trials for stimulation
if (len(psth.TrialCondition.get_trials(stim_left_cond_name) & unit) < 2
or len(psth.TrialCondition.get_trials(stim_right_cond_name) & unit) < 2):
continue
hemi = _get_units_hemisphere(unit)
ctrl_left_psth, t_vec = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': ctrl_left_cond_name})['psth']
ctrl_right_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': ctrl_right_cond_name})['psth']
try:
stim_left_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': stim_left_cond_name})['psth']
stim_right_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': stim_right_cond_name})['psth']
except:
continue
if unit['unit_selectivity'] == 'ipsi-selective':
ctrl_psth_diff = ctrl_left_psth - ctrl_right_psth if hemi == 'left' else ctrl_right_psth - ctrl_left_psth
stim_psth_diff = stim_left_psth - stim_right_psth if hemi == 'left' else stim_right_psth - stim_left_psth
elif unit['unit_selectivity'] == 'contra-selective':
ctrl_psth_diff = ctrl_left_psth - ctrl_right_psth if hemi == 'right' else ctrl_right_psth - ctrl_left_psth
stim_psth_diff = stim_left_psth - stim_right_psth if hemi == 'right' else stim_right_psth - stim_left_psth
ctrl_psths.append(ctrl_psth_diff)
delta_sels.append(ctrl_psth_diff - stim_psth_diff)
ctrl_psths = np.vstack(ctrl_psths)
delta_sels = np.vstack(delta_sels)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(4, 6))
_plot_with_sem(delta_sels, t_vec, ax)
if recover_time_window:
recovery_times = []
for i in range(1000):
i_sample = np.random.choice(delta_sels.shape[0], delta_sels.shape[0], replace = True)
btstrp_diff = np.nanmean(delta_sels[i_sample, :], axis = 0) / np.nanmean(ctrl_psths[i_sample, :], axis = 0)
t_recovered = t_vec[
(btstrp_diff < 0.2) & (t_vec > recover_time_window[0]) & (t_vec < recover_time_window[1])]
if len(t_recovered) > 0:
recovery_times.append(t_recovered[0])
ax.axvline(x = np.mean(recovery_times), linestyle = '--', color = 'g')
ax.axvspan(np.mean(recovery_times) - np.std(recovery_times), np.mean(recovery_times) + np.std(recovery_times),
alpha = 0.2, color = 'g')
ax.axhline(y=0, color = 'k')
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
# add shaded bar for photostim
ax.axvspan(stim_time, stim_time + stim_dur, 0.95, 1, alpha = 0.3, color = 'royalblue')
ax.set_ylabel('Selectivity change (spike/s)')
ax.set_xlabel('Time (s)')
def plot_coding_direction(units, time_period=None, axs=None):
_, proj_contra_trial, proj_ipsi_trial, time_stamps = psth.compute_CD_projected_psth(
units.fetch('KEY'), time_period=time_period)
period_starts = (experiment.Period & 'period in ("sample", "delay", "response")').fetch('period_start')
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(8, 6))
# plot
_plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')
_plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')
for x in period_starts:
axs.axvline(x=x, linestyle = '--', color = 'k')
# cosmetic
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_ylabel('CD projection (a.u.)')
axs.set_xlabel('Time (s)')
def plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):
"""
Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)
Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period
"""
_, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps = psth.compute_CD_projected_psth(
unit_g1.fetch('KEY'), time_period=time_period)
_, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps = psth.compute_CD_projected_psth(
unit_g2.fetch('KEY'), time_period=time_period)
period_starts = (experiment.Period & 'period in ("sample", "delay", "response")').fetch('period_start')
if labels:
assert len(labels) == 2
else:
labels = ('unit group 1', 'unit group 2')
# plot projected trial-psth
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
_plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')
_plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')
_plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')
_plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')
# cosmetic
for ax, label in zip(axs, labels):
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('CD projection (a.u.)')
ax.set_xlabel('Time (s)')
ax.set_title(label)
# plot trial CD-endpoint correlation
p_start, p_end = time_period
contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
c_df = | pd.DataFrame([contra_cdend_1, contra_cdend_2]) | pandas.DataFrame |
import collections
import logging
import os
import pprint
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as cartif
import core.signal_processing as csigna
import helpers.git as git
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class Test__compute_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_df()
output_df = csigna._compute_lagged_cumsum(input_df, 3)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 3, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test_lag_1(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 1, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_df() -> pd.DataFrame:
df = pd.DataFrame([list(range(10))] * 3).T
df[1] = df[0] + 1
df[2] = df[0] + 2
df.index = pd.date_range(start="2010-01-01", periods=10)
df.rename(columns=lambda x: f"col_{x}", inplace=True)
return df
class Test_correlate_with_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1", "y2"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1"], x_vars=["x"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_arma_df(seed: int = 0) -> pd.DataFrame:
arma_process = cartif.ArmaProcess([], [])
date_range = {"start": "2010-01-01", "periods": 40, "freq": "M"}
srs1 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
).rename("x")
srs2 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 1
).rename("y1")
srs3 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 2
).rename("y2")
return pd.concat([srs1, srs2, srs3], axis=1)
class Test_accumulate(hut.TestCase):
def test1(self) -> None:
srs = pd.Series(
range(0, 20), index=pd.date_range("2010-01-01", periods=20)
)
actual = csigna.accumulate(srs, num_steps=1)
expected = srs.astype(float)
pd.testing.assert_series_equal(actual, expected)
def test2(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test3(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, 3, 6, 9, 12, 15, 18, 21, 24], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test4(self) -> None:
srs = pd.Series(
np.random.randn(100), index=pd.date_range("2010-01-01", periods=100)
)
output = pd.concat([srs, csigna.accumulate(srs, num_steps=5)], axis=1)
output.columns = ["series", "series_accumulated"]
self.check_string(hut.convert_df_to_string(output, index=True))
def test_long_step1(self) -> None:
idx = pd.date_range("2010-01-01", periods=3)
srs = pd.Series([1, 2, 3], index=idx)
actual = csigna.accumulate(srs, num_steps=5)
expected = pd.Series([np.nan, np.nan, np.nan], index=idx)
| pd.testing.assert_series_equal(actual, expected) | pandas.testing.assert_series_equal |
import os
import tqdm
import argparse
import numpy as np
import pandas as pd
from copy import deepcopy
import random
import logging
import math
from functools import partial
from tensorboardX import SummaryWriter
from common.trainer import ClassifierTrainer, load_checkpoint, save_checkpoint
from common.dataset import TIMIT_speaker_norm, LibriSpeech_speaker
from common.utils import read_conf, get_dict_from_args
from common.model import SincClassifier
import torch
import torchvision
from torch import nn, optim
from torch.utils.data import DataLoader
from generator import Generator1D
@torch.no_grad()
def evaluate(model, test_dataset, cost, target=-1, noise_scale=1):
test_dataloader = DataLoader(test_dataset, 128, shuffle=False, num_workers=8, pin_memory=True)
model.eval()
bar = tqdm.tqdm(test_dataloader)
loss_all = {}
err_fake_all = {}
noise_all = {}
for idx, data in enumerate(bar):
wav_data, speaker_id, norm_factor = data
batch_size = wav_data.shape[0]
noise_dim = model.noise_dim
noise = torch.randn(size=(batch_size, noise_dim))
noise, wav_data, speaker_id = noise.float().cuda(), wav_data.float().cuda(), speaker_id.long().cuda()
norm_factor = norm_factor.unsqueeze(1).repeat(1, wav_data.shape[1]).float().cuda()
loss_func = cost
with torch.no_grad():
pout = model.forward(noise)
print(pout.shape)
pout = (pout*noise_scale + wav_data*norm_factor).clamp_(-1,1)/norm_factor
labels = {"speaker":speaker_id, "norm":wav_data}
loss_total, loss_dict, loss_dict_grad, pred_dict, label_dict = loss_func(pout, labels)
pred = torch.max(pred_dict['speaker'], dim=1)[1]
if target<0:
label = label_dict['speaker']
err_speaker = torch.mean((pred != label).float()).detach().cpu().item()
else:
err_speaker = torch.mean((pred != target).float()).detach().cpu().item()
err_dict = {"err_spk":err_speaker}
err_str = get_dict_str(err_dict)
loss_total = loss_total.detach().cpu().item()
loss_str = get_dict_str(loss_dict)
loss_dict.update(err_dict)
noise = (pout.detach()-wav_data.detach())
noise_mean, noise_std, noise_abs = torch.mean(noise).item(), torch.std(noise).item(), torch.mean(torch.abs(noise)).item()
noise_dict = {"mean":noise_mean*1e3, "std":noise_std*1e3, "m_abs":noise_abs*1e3}
noise_str = get_dict_str(noise_dict)
loss_dict.update(noise_dict)
def accumulate_dict(total_dict, item_dict, factor):
for k,v in item_dict.items():
total_dict[k] = total_dict.get(k,0)+v*factor
return total_dict
loss_all = accumulate_dict(loss_all, loss_dict, len(speaker_id))
err_fake_all = accumulate_dict(err_fake_all, err_dict, len(speaker_id))
noise_all = accumulate_dict(noise_all, noise_dict, len(speaker_id))
bar.set_description("err:({}), noise(e-3):({}), batch size:{}".format(err_str, noise_str, len(speaker_id)))
bar.close()
def multiply_dict(data_dict, factor):
for k,v in data_dict.items():
data_dict[k] = v*factor
return data_dict
loss_all = multiply_dict(loss_all, 1.0/len(test_dataset))
err_fake_all = multiply_dict(err_fake_all, 1.0/len(test_dataset))
noise_all = multiply_dict(noise_all, 1.0/len(test_dataset))
print(get_dict_str(loss_all), get_dict_str(err_fake_all), get_dict_str(noise_all))
@torch.no_grad()
def sentence_test(speaker_model, wav_data, wlen=3200, wshift=10, batch_size=128):
"""
wav_data: B, L
"""
wav_data = wav_data.squeeze()
L = wav_data.shape[0]
pred_all = []
begin_idx = 0
batch_data = []
while begin_idx<L-wlen:
batch_data.append(wav_data[begin_idx:begin_idx+wlen])
if len(batch_data)>=batch_size:
pred_batch = speaker_model(torch.stack(batch_data))
pred_all.append(pred_batch)
batch_data = []
begin_idx += wshift
if len(batch_data)>0:
pred_batch = speaker_model(torch.stack(batch_data))
pred_all.append(pred_batch)
[val,best_class]=torch.max(torch.sum(torch.cat(pred_all, dim=0),dim=0),0)
return best_class.detach().cpu().item()
import soundfile as sf
from common.utils import SNR, PESQ
from common.trainer import RunningAverage
@torch.no_grad()
def test_wav(model:Generator1D, filename_list, data_folder, out_folder, speaker_model=None, label_dict=None, target=-1, noise_scale=1):
model.eval()
if speaker_model: speaker_model.eval()
noise_dim = model.noise_dim
batch_size = 1
bar = tqdm.tqdm(filename_list)
averager = RunningAverage()
pertutations = []
pred_results = []
save_every = 2000
save_idx = 0
for idx, filename in enumerate(bar):
noise = torch.randn(size=(batch_size, noise_dim))
real_data, fs = sf.read(os.path.join(data_folder, filename))
real_data_norm, real_norm_factor = TIMIT_speaker_norm.preprocess(real_data)
pout = model.forward(noise.float().cuda()).squeeze().detach().cpu().numpy()
# print(np.abs(pout).mean())
# cycle
noise_all = np.concatenate([pout]*int(math.ceil(len(real_data)/float(len(pout)))))[:len(real_data)]
fake_data = (noise_all*noise_scale + real_data).clip(-1,1)
fake_data_norm = fake_data/np.abs(fake_data).max()
# save data
output_filename = os.path.join(out_folder, filename)
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
# print(fake_data.shape)
sf.write(output_filename, fake_data, fs)
snr = SNR(fake_data, real_data)
pesq = PESQ(real_data, fake_data, fs)
averager.update({"SNR":snr, "PESQ":pesq}, {"SNR":snr, "PESQ":pesq})
output_str = "SNR:{:5.2f}, PESQ:{:5.2f}".format(snr, pesq)
pertutations.append((real_data-fake_data).astype(np.float16))
if speaker_model:
label = label_dict[filename]
pred_fake = sentence_test(speaker_model, torch.from_numpy(fake_data_norm).float().cuda().unsqueeze(0))
if target != -1:
err_rate = (pred_fake == target)
averager.update({"err_rate":err_rate}, {"err_rate":1})
pred_real = sentence_test(speaker_model, torch.from_numpy(real_data_norm).float().cuda().unsqueeze(0))
averager.update({"err_rate_raw":pred_real!=label, "target_rate_raw":pred_real==target}, {"err_rate_raw":1, "target_rate_raw":1})
pred_results.append({'file':filename, 'pred_real':pred_real, 'pred_fake':pred_fake, 'label':label})
else:
err_rate = (pred_fake != label)
averager.update({"err_rate":err_rate}, {"err_rate":1})
pred_results.append({'file':filename, 'pred_fake':pred_fake, 'label':label})
output_str += ", real/fake:{}/{}, data len:{}".format(label, pred_fake, fake_data.shape)
bar.set_description(output_str+filename)
if len(pertutations)>=save_every:
np.save(os.path.join(out_folder, "pertutation.{}.npy".format(save_idx)), (pertutations))
pertutations = []
if len(pred_results)>0:
pd.DataFrame(pred_results).to_csv(os.path.join(out_folder, "pred_results.{}.csv".format(save_idx)))
pred_results = []
save_idx += 1
np.save(os.path.join(out_folder, "pertutation.{}.npy".format(save_idx)), (pertutations))
if len(pred_results)>0:
pd.DataFrame(pred_results).to_csv(os.path.join(out_folder, "pred_results.{}.csv".format(save_idx)))
bar.close()
avg = averager.average()
print(get_dict_str(avg))
def test_interpolation(model:Generator1D, filename_list, data_folder, out_folder, speaker_model=None, label_dict=None, target=-1, noise_scale=1, beta=0):
model.eval()
if speaker_model: speaker_model.eval()
noise_dim = model.noise_dim
batch_size = 1
bar = tqdm.tqdm(filename_list)
averager = RunningAverage()
pertutations = []
pred_results = []
save_every = 2000
save_idx = 0
noise1 = torch.randn(size=(batch_size, noise_dim))
noise2 = torch.randn(size=(batch_size, noise_dim))
noise = noise1 * (1-beta) + noise2 * beta
pout = model.forward(noise.float().cuda()).squeeze().detach().cpu().numpy()
if beta < 0:
pout = torch.randn(size=pout.shape).mul_(-beta).numpy()
for idx, filename in enumerate(bar):
real_data, fs = sf.read(os.path.join(data_folder, filename))
real_data_norm, real_norm_factor = TIMIT_speaker_norm.preprocess(real_data)
# print(np.abs(pout).mean())
# cycle
if beta < 0:
noise_all = torch.randn(real_data.shape).mul_(-beta).numpy()
else:
noise_all = np.concatenate([pout]*int(math.ceil(len(real_data)/float(len(pout)))))[:len(real_data)]
fake_data = (noise_all*noise_scale + real_data).clip(-1,1)
fake_data_norm = fake_data/np.abs(fake_data).max()
# save data
output_filename = os.path.join(out_folder, filename)
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
# print(fake_data.shape)
sf.write(output_filename, fake_data, fs)
snr = SNR(fake_data, real_data)
pesq = PESQ(real_data, fake_data, fs)
averager.update({"SNR":snr, "PESQ":pesq}, {"SNR":snr, "PESQ":pesq})
output_str = "SNR:{:5.2f}, PESQ:{:5.2f}".format(snr, pesq)
pertutations.append((real_data-fake_data).astype(np.float16))
if speaker_model:
label = label_dict[filename]
pred_fake = sentence_test(speaker_model, torch.from_numpy(fake_data_norm).float().cuda().unsqueeze(0))
if target != -1:
err_rate = (pred_fake == target)
averager.update({"err_rate":err_rate}, {"err_rate":1})
pred_real = sentence_test(speaker_model, torch.from_numpy(real_data_norm).float().cuda().unsqueeze(0))
averager.update({"err_rate_raw":pred_real!=label, "target_rate_raw":pred_real==target}, {"err_rate_raw":1, "target_rate_raw":1})
pred_results.append({'file':filename, 'pred_real':pred_real, 'pred_fake':pred_fake, 'label':label})
else:
err_rate = (pred_fake != label)
averager.update({"err_rate":err_rate}, {"err_rate":1})
pred_results.append({'file':filename, 'pred_fake':pred_fake, 'label':label})
output_str += ", real/fake:{}/{}, data len:{}".format(label, pred_fake, fake_data.shape)
bar.set_description(output_str+filename)
if len(pertutations)>=save_every:
np.save(os.path.join(out_folder, "pertutation.{}.npy".format(save_idx)), (pertutations))
pertutations = []
if len(pred_results)>0:
pd.DataFrame(pred_results).to_csv(os.path.join(out_folder, "pred_results.{}.csv".format(save_idx)))
pred_results = []
save_idx += 1
np.save(os.path.join(out_folder, "pertutation.{}.npy".format(save_idx)), (pertutations))
if len(pred_results)>0:
| pd.DataFrame(pred_results) | pandas.DataFrame |
"""Script to prepare weather data
Load weather simulation data
1. Download yearly data from here: http://catalogue.ceda.ac.uk/uuid/0cea8d7aca57427fae92241348ae9b03
2. Extract data
3. Run this script to get only the relevant data
Links
------
http://data.ceda.ac.uk//badc//weather_at_home/data/marius_time_series/CEDA_MaRIUS_Climate_Data_Description.pdf
http://data.ceda.ac.uk/badc/weather_at_home/data/marius_time_series/near_future/data/
https://medium.com/@rtjeannier/pandas-101-cont-9d061cb73bfc
"""
import os
import pytemperature
import xarray as xr
import numpy as np
import pandas as pd
from energy_demand.basic import basic_functions
from energy_demand.read_write import write_data
def create_realisation(
base_yr_remapped_weather_path,
realisation_list,
realisation_path,
realisation_out_path,
path_stiching_table
):
"""
Before running, generate 2015 remapped data
"""
sim_yr_start = 2015
sim_yr_end = 2050 + 1
print("... writing data", flush=True)
write_to_csv = True
write_to_np = False
write_to_parquet = False
# Create result path
basic_functions.create_folder(realisation_out_path)
# Read in stiching table
df_path_stiching_table = pd.read_table(path_stiching_table, sep=" ")
# Set year as index
df_path_stiching_table = df_path_stiching_table.set_index('year')
# Realisations
realisations = list(df_path_stiching_table.columns)
columns = ['timestep', 'station_id', 'yearday', 't_min', 't_max']
for i in realisation_list:
realisation = realisations[i]
print("... creating weather data for realisation " + str(realisation), flush=True)
realisation_out = []
stations_out = pd.DataFrame()
for sim_yr in range(sim_yr_start, sim_yr_end):
print(" ... year: " + str(sim_yr), flush=True)
# If year 2015 - 2019, take base year weather
if sim_yr in range(2015, 2020):
print("... for year '{}' data from the year 2015 are used".format(sim_yr))
path_weather_data = base_yr_remapped_weather_path
path_t_min = os.path.join(path_weather_data, "t_min_remapped.npy")
path_t_max = os.path.join(path_weather_data, "t_max_remapped.npy")
path_stations = os.path.join(path_weather_data, "stations_2015_remapped.csv")
elif sim_yr == 2050:
print("... for year '{}' data from the year 2049 are used".format(sim_yr))
year = 2049
stiching_name = df_path_stiching_table[realisation][year]
path_weather_data = os.path.join(realisation_path, str(year), stiching_name)
path_t_min = os.path.join(path_weather_data, "t_min.npy")
path_t_max = os.path.join(path_weather_data, "t_max.npy")
path_stations = os.path.join(path_weather_data, "stations.csv")
else:
year = sim_yr
stiching_name = df_path_stiching_table[realisation][year]
path_weather_data = os.path.join(realisation_path, str(year), stiching_name)
path_t_min = os.path.join(path_weather_data, "t_min.npy")
path_t_max = os.path.join(path_weather_data, "t_max.npy")
path_stations = os.path.join(path_weather_data, "stations.csv")
# Read t_min, t_max, stations)
t_min = np.load(path_t_min)
t_max = np.load(path_t_max)
stations = pd.read_csv(path_stations)
stations['timestep'] = sim_yr
stations_out = stations_out.append(stations)
nr_stations_arry = len(list(stations.values))
for station_cnt in range(nr_stations_arry):
t_min_station = t_min[station_cnt]
t_max_station = t_max[station_cnt]
station_id = 'station_id_{}'.format(station_cnt)
for yearday in range(365):
realisation_out.append(
[sim_yr, station_id, yearday, t_min_station[yearday], t_max_station[yearday]])
# Write data to csv
if write_to_csv:
df = pd.DataFrame(realisation_out, columns=columns)
path_out_csv = os.path.join(realisation_out_path, "weather_data_{}.csv".format(realisation))
df.to_csv(path_out_csv, index=False)
if write_to_parquet:
path_out_parquet = os.path.join(realisation_out_path, "weather_data_{}.parquet".format(realisation))
df = | pd.DataFrame(realisation_out, columns=columns) | pandas.DataFrame |
import pickle
import os
import pandas as pd
import settings
class HousePriceModel():
def __init__(self, model_dir, models_path=settings.MODELS_FOLDER):
"""Create the HousePriceModel object
Args:
model_dir (str): directory where the model is stored
models_path (str): path to models directory
"""
self.path = os.path.join(models_path, model_dir)
print('------', self.path)
self.model = self.load_model()
self.preds = None
def load_model(self):
pkl_filename = os.path.join(self.path, 'model.pkl')
try:
with open(pkl_filename, 'rb') as file:
pickle_model = pickle.load(file)
except:
print(f'Error loading the model at {pkl_filename}')
return None
return pickle_model
def predict(self, data):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data, index=[0])
self.preds = self.model.predict(data)
return self.preds
def get_input_example(self):
"""Get an input example for the model
Returns:
Dataframe: a dataframe with a set of examples
"""
input_template = os.path.join(self.path, "test_input.csv")
input_example = | pd.read_csv(input_template) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Utility function for estimator testing.
copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "fkiraly"]
from inspect import signature
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_random_state
from sktime.alignment.base import BaseAligner
from sktime.annotation.base import BaseSeriesAnnotator
from sktime.classification.base import BaseClassifier
from sktime.clustering.base.base import BaseClusterer
from sktime.datatypes._panel._check import is_nested_dataframe
from sktime.dists_kernels import BasePairwiseTransformer, BasePairwiseTransformerPanel
from sktime.forecasting.base import BaseForecaster
from sktime.regression.base import BaseRegressor
from sktime.tests._config import VALID_ESTIMATOR_TYPES
from sktime.transformations.base import (
BaseTransformer,
_PanelToPanelTransformer,
_PanelToTabularTransformer,
_SeriesToPrimitivesTransformer,
_SeriesToSeriesTransformer,
)
from sktime.utils._testing.annotation import make_annotation_problem
from sktime.utils._testing.forecasting import (
_get_n_columns,
_make_series,
make_forecasting_problem,
)
from sktime.utils._testing.panel import (
_make_panel_X,
make_classification_problem,
make_clustering_problem,
make_regression_problem,
)
def _get_err_msg(estimator):
return (
f"Invalid estimator type: {type(estimator)}. Valid estimator types are: "
f"{VALID_ESTIMATOR_TYPES}"
)
def _construct_instance(Estimator):
"""Construct Estimator instance if possible."""
# return the instance of the class with default parameters
return Estimator.create_test_instance()
def _list_required_methods(estimator):
"""Return list of required method names (beyond BaseEstimator ones)."""
# all BaseEstimator children must implement these
MUST_HAVE_FOR_ESTIMATORS = [
"fit",
"check_is_fitted",
"is_fitted", # read-only property
"set_params",
"get_params",
]
# prediction/forecasting base classes that must have predict
BASE_CLASSES_THAT_MUST_HAVE_PREDICT = (
BaseClusterer,
BaseRegressor,
BaseForecaster,
)
# transformation base classes that must have transform
BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM = (
BaseTransformer,
BasePairwiseTransformer,
BasePairwiseTransformerPanel,
)
required_methods = []
if isinstance(estimator, BaseEstimator):
required_methods += MUST_HAVE_FOR_ESTIMATORS
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_PREDICT):
required_methods += ["predict"]
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM):
required_methods += ["transform"]
if isinstance(estimator, BaseAligner):
required_methods += [
"get_alignment",
"get_alignment_loc",
"get_aligned",
"get_distance",
"get_distance_matrix",
]
return required_methods
def _make_args(estimator, method, **kwargs):
"""Generate testing arguments for estimator methods."""
if method == "fit":
return _make_fit_args(estimator, **kwargs)
if method == "update":
raise NotImplementedError()
elif method in ("predict", "predict_proba", "decision_function"):
return _make_predict_args(estimator, **kwargs)
elif method == "transform":
return _make_transform_args(estimator, **kwargs)
elif method == "inverse_transform":
return _make_inverse_transform_args(estimator, **kwargs)
else:
raise ValueError(f"Method: {method} not supported")
def _make_fit_args(estimator, **kwargs):
if isinstance(estimator, BaseForecaster):
# we need to handle the TransformedTargetForecaster separately
if isinstance(estimator, _SeriesToSeriesTransformer):
y = _make_series(**kwargs)
else:
# create matching n_columns input, if n_columns not passed
# e.g., to give bivariate y to strictly multivariate forecaster
if "n_columns" not in kwargs.keys():
n_columns = _get_n_columns(
estimator.get_tag(tag_name="scitype:y", raise_error=False)
)[0]
y = make_forecasting_problem(n_columns=n_columns, **kwargs)
else:
y = make_forecasting_problem(**kwargs)
fh = 1
X = None
return y, X, fh
elif isinstance(estimator, BaseSeriesAnnotator):
X = make_annotation_problem(**kwargs)
return (X,)
elif isinstance(estimator, BaseClassifier):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseRegressor):
return make_regression_problem(**kwargs)
elif isinstance(
estimator, (_SeriesToPrimitivesTransformer, _SeriesToSeriesTransformer)
):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, (_PanelToTabularTransformer, _PanelToPanelTransformer)):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseTransformer):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, BaseClusterer):
return (make_clustering_problem(**kwargs),)
elif isinstance(estimator, BasePairwiseTransformer):
return None, None
elif isinstance(estimator, BasePairwiseTransformerPanel):
return None, None
elif isinstance(estimator, BaseAligner):
X = [_make_series(n_columns=2, **kwargs), _make_series(n_columns=2, **kwargs)]
return (X,)
else:
raise ValueError(_get_err_msg(estimator))
def _make_predict_args(estimator, **kwargs):
if isinstance(estimator, BaseForecaster):
fh = 1
return (fh,)
elif isinstance(estimator, (BaseClassifier, BaseRegressor)):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BaseSeriesAnnotator):
X = make_annotation_problem(n_timepoints=10, **kwargs)
return (X,)
elif isinstance(estimator, BaseClusterer):
X = _make_panel_X(**kwargs)
return (X,)
else:
raise ValueError(_get_err_msg(estimator))
def _make_transform_args(estimator, **kwargs):
if isinstance(
estimator, (_SeriesToPrimitivesTransformer, _SeriesToSeriesTransformer)
):
X = _make_series(**kwargs)
return (X,)
elif isinstance(
estimator,
(
_PanelToTabularTransformer,
_PanelToPanelTransformer,
),
):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BaseTransformer):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, BasePairwiseTransformer):
d = {"col1": [1, 2], "col2": [3, 4]}
return | pd.DataFrame(d) | pandas.DataFrame |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-15-Cristina Piselli-Zeng
1. weather data stored in multiple sheets and needs to be combined
2. read the data and append to the templates
'''
import os
import glob
import datetime
import pandas as pd
# specify the path
data_path = "D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-15-Cristina Piselli-Zeng/_yapan_processing/"
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
begin_time = datetime.datetime.now()
'''
1. read the two excel files into pandas and clean the data
'''
# read the data from excel and combine all the worksheets
combined_indoor = pd.concat(pd.read_excel(data_path + 'LivingEAPLAB_indoor-dataset.xlsx', sheet_name=None),
ignore_index=True)
combined_outdoor = pd.concat(pd.read_excel(data_path + 'LivingEAPLAB_weather-dataset.xlsx', sheet_name=None),
ignore_index=True)
# check missing values, and sum missing value count by column
print('Check missing values in : LivingEAPLAB_indoor-dataset.xlsx')
print(combined_indoor.isnull().sum())
print('Check missing values in : LivingEAPLAB_weather-dataset.xlsx')
print(combined_outdoor.isnull().sum())
# print out rows which contain nan values
is_NaN = combined_indoor.isnull()
row_has_NaN = is_NaN.any(axis=1)
rows_with_NaN = combined_indoor[row_has_NaN]
print('rows with missing values')
print(rows_with_NaN)
# data cleaning
combined_indoor.drop(combined_indoor.columns[[2, 3, 4, 9]], axis=1, inplace=True) # drop unused columns
# drop the ID and Datetime that have nan values
combined_indoor = combined_indoor[combined_indoor['ID_office'].notna() &
combined_indoor['Date [dd/MM/yyyy hh:mm:ss]'].notna()].copy()
combined_indoor.fillna(value=-999, inplace=True) # fill missing values with -999
combined_indoor.reset_index(drop=True, inplace=True)
# check if there are any duplicated headers
duplicates = combined_outdoor['Date'] != 'Date'
duplicates.unique() # check if False exist
# combined_outdoor = combined_outdoor[combined_outdoor['Date'] != 'Date'] # remove duplicated headers
# combined_outdoor.columns
'''
2. save the data into templates
'''
# create roomInfo dataframe to assign room Ids
roomIds = list(combined_indoor['ID_office'].unique())
roomNums = list(range(1,6))
roomInfo = pd.DataFrame({'roomId': roomIds, 'roomNumber': roomNums})
# replace roomIds with roomNums
combined_indoor.replace(roomIds, roomNums, inplace=True)
# read templates into pandas
template_appliance = pd.read_csv(template_path+'Appliance_Usage.csv')
template_appliance['Room_ID'] = ''
template_door = pd.read_csv(template_path+'Door_Status.csv')
template_window = pd.read_csv(template_path+'Window_Status.csv')
template_indoor = pd.read_csv(template_path+'Indoor_Measurement.csv')
template_outdoor = pd.read_csv(template_path+'Outdoor_Measurement.csv')
''' 2.1 Appliance_Usage.csv'''
# adding data into templates
rowNum = combined_indoor.shape[0]
id1 = pd.Series([1] * rowNum) # create a series of ids
id2 = pd.Series([2] * rowNum) # create a series of ids
# append appliance usage data of datalogger 1
applicance_df = template_appliance.copy()
applicance_df['Date_Time'] = combined_indoor[combined_indoor.columns[1]] # datetime
applicance_df['Electric_Power'] = combined_indoor['El-1 [A]']*230 # power usage
applicance_df['Appliance_ID'] = id1 # appliance ID
applicance_df['Room_ID'] = combined_indoor['ID_office'] # room ID
# concat the two dataframe
template_appliance = pd.concat([template_appliance, applicance_df], ignore_index=True, sort=False)
# append appliance usage data of datalogger 2
applicance_df = template_appliance.copy()
applicance_df['Date_Time'] = combined_indoor[combined_indoor.columns[1]]
applicance_df['Electric_Power'] = combined_indoor['El-2 [A]']*230
applicance_df['Appliance_ID'] = id2
applicance_df['Room_ID'] = combined_indoor['ID_office']
# concat the two dataframe
template_appliance = pd.concat([template_appliance, applicance_df], ignore_index=True, sort=False)
template_appliance.replace([-999*230, -999.0*230], [-999, -999], inplace=True) # replace the scaled missing value
# double check: -999.0*230 in template_appliance.values or -999*230 in template_appliance.values
# final check before saving the data
print(template_appliance.isnull().sum()) # check null values in the dataframe
# template_appliance.dtypes
template_appliance['Date_Time'] = pd.to_datetime(template_appliance['Date_Time'], format="%Y-%m-%d %H:%M:%S")
template_appliance['Electric_Power'] = template_appliance['Electric_Power'].astype(str).astype(float)
template_appliance['Appliance_ID'] = template_appliance['Appliance_ID'].astype(str).astype(int)
template_appliance['Room_ID'] = template_appliance['Room_ID'].astype(str).astype(int)
template_appliance['Appliance_Usage_ID'] = ''
# save Appliance_Usage.csv
template_appliance.to_csv(data_path+'Appliance_Usage.csv', index=False)
''' 2.2 Door_Status.csv '''
# add data to the dataframe
template_door['Date_Time'] = combined_indoor[combined_indoor.columns[1]]
template_door['Door_Status'] = combined_indoor['Door open [-]']
template_door['Room_ID'] = combined_indoor['ID_office']
# check null values
print(template_door.isnull().sum())
template_door.fillna('', inplace=True) # fill nan values with empty
# change type of the column
# template_door.dtypes
template_door['Date_Time'] = pd.to_datetime(template_door['Date_Time'], format="%Y-%m-%d %H:%M:%S")
template_door['Door_Status'] = template_door['Door_Status'].astype(int)
template_door['Room_ID'] = template_door['Room_ID'].astype(int)
print(template_door.isnull().sum())
# save Door_Status.csv
template_door.to_csv(data_path+'Door_Status.csv', index=False)
''' 2.3 Window_Status.csv '''
# add data to the dataframe
template_window['Date_Time'] = combined_indoor[combined_indoor.columns[1]]
template_window['Window_Status'] = combined_indoor['Win open [-]']
template_window['Room_ID'] = combined_indoor['ID_office']
# check null values
print(template_window.isnull().sum())
template_window.fillna('', inplace=True) # fill nan values with empty
# change type of the column
# template_door.dtypes
template_window['Date_Time'] = pd.to_datetime(template_window['Date_Time'], format="%Y-%m-%d %H:%M:%S")
template_window['Window_Status'] = template_window['Window_Status'].astype(int)
template_window['Room_ID'] = template_window['Room_ID'].astype(int)
print(template_window.isnull().sum())
# save Door_Status.csv
template_window.to_csv(data_path+'Window_Status.csv', index=False)
print(f'Total running time: {datetime.datetime.now() - begin_time}')
''' 2.4 Indoor_Measurement.csv '''
# add data to the dataframe
template_indoor['Date_Time'] = combined_indoor[combined_indoor.columns[1]]
template_indoor['Indoor_Temp'] = combined_indoor['Air T [°C]']
template_indoor['Indoor_Illuminance'] = combined_indoor['Illum [lux]']
template_indoor['Room_ID'] = combined_indoor['ID_office']
# check null values
print(template_indoor.isnull().sum()) # check the missing values of columns which have data inside
template_indoor.fillna('', inplace=True) # fill nan values with empty
# change type of the column
# template_door.dtypes
template_indoor['Date_Time'] = | pd.to_datetime(template_indoor['Date_Time'], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start= | Timestamp(year=2017, month=1, day=1) | pandas.Timestamp |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0)
commerces = commerces.drop_duplicates("commerce_name")
i = 0
for _, row in commerces.iterrows():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_company",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_summaries(concat_uk_sector=False)
members = members[cols_of_interest]
members = members.drop_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~pd.isnull(members["tenancies"])]
members["about_company"] = members["about_company"].map(remove_html_tags, na_action="ignore")
members = members.sort_values("member_name")
i = 0
for _, row in members.iterrows():
member_name = row["member_name"]
if pd.isnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not | pd.isnull(row[k]) | pandas.isnull |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:3]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition,
transition=transition)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [0.0, 1.0]], index=midx,
columns=cols)
assert_frame_equal(wts, wts_exp)
def test_during_roll_two_generics_one_day_static_transition(dates):
contract_dates = dates
ts = dates.iloc[0] + BDay(-1)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 0.5, ts), (0, 'CLZ16', 0.5, ts),
(1, 'CLZ16', 0.5, ts), (1, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_invalid_contract_dates():
ts = [pd.Timestamp("2016-10-19")]
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
non_unique_index = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-11-21')],
index=['instr1', 'instr1'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_index, mappings.static_transition,
transition=trans)
non_unique_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-20')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_vals, mappings.static_transition,
transition=trans)
non_monotonic_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-19')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.static_transition(ts[0], non_monotonic_vals, transition=trans)
not_enough_vals = pd.Series([pd.Timestamp('2016-10-19')],
index=['instr1'])
with pytest.raises(IndexError):
mappings.static_transition(ts[0], not_enough_vals, transition=trans)
def test_during_roll_two_generics_one_day_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-1)])
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_product([timestamps,
['CLF17', 'CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[0, 0.5], [0.5, 0], [0.5, 0.5]],
index=midx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_whole_roll_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-2), dt + BDay(-1), dt])
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1, 0, 1, 0], [0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_tuples([(timestamps[0], 'CLX16'),
(timestamps[0], 'CLZ16'),
(timestamps[1], 'CLF17'),
(timestamps[1], 'CLX16'),
(timestamps[1], 'CLZ16'),
(timestamps[2], 'CLF17'),
(timestamps[2], 'CLZ16')])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1, 0], [0, 1], [0, 0.5], [0.5, 0], [0.5, 0.5],
[0, 1], [1, 0]],
index=midx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_roll_to_roll_two_generics():
contract_dates = pd.Series(
[TS('2016-10-10'), TS('2016-10-13'), TS('2016-10-17'), TS('2016-10-20')],
index=['A', 'B', 'C', 'D']
)
timestamps = pd.date_range(contract_dates.iloc[0] + BDay(-2),
contract_dates.iloc[1], freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1, 0, 1, 0], [0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]], index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
midx = pd.MultiIndex.from_tuples([(timestamps[0], 'A'),
(timestamps[0], 'B'),
(timestamps[1], 'A'),
(timestamps[1], 'B'),
(timestamps[1], 'C'),
(timestamps[2], 'B'),
(timestamps[2], 'C'),
(timestamps[3], 'B'),
(timestamps[3], 'C'),
(timestamps[4], 'B'),
(timestamps[4], 'C'),
(timestamps[4], 'D'),
(timestamps[5], 'C'),
(timestamps[5], 'D')])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1],
[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
wts_exp = pd.DataFrame(vals, index=midx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_to_generics_two_generics_exact_soln():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([20.0, 20.0], index=[0, 1])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_exact_soln_negative():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = pd.Series([10, 0, -10], index=["CLX16", "CLZ16", "CLF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([20.0, -20.0], index=[0, 1])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_zero_generics_weight():
# scenario where one generic has 0 weight, tests for bug where result
# has epsilon weight on CL1
wts = pd.DataFrame([[0, 1]], index=["CLZ16"], columns=["CL1", "CL2"])
notional = pd.Series([-13900.0], index=["CLZ16"])
generics = mappings.to_generics(notional, wts)
exp_generics = pd.Series([-13900.0], index=["CL2"])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_minimize_error_non_integer_soln():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = pd.Series([10, 20, 11], index=["CLX16", "CLZ16", "CLF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([19.5, 21.5], index=[0, 1])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_minimize_error_integer_soln():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
instrs = pd.Series([10, 25, 11], index=["CLX16", "CLZ16", "CLF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([22.0, 24.0], index=[0, 1])
assert_series_equal(generics, exp_generics)
def test_to_generics_three_generics_exact_soln():
wts = pd.DataFrame([[0.5, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5],
[0, 0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17', 'CLG17'],
columns=[0, 1, 2])
instrs = pd.Series([10, 20, 20, 10],
index=["CLX16", "CLZ16", "CLF17", "CLG17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([20.0, 20.0, 20.0], index=[0, 1, 2])
assert_series_equal(generics, exp_generics)
def test_to_generics_three_generics_non_exact_soln():
wts = pd.DataFrame([[0.5, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5],
[0, 0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17', 'CLG17'],
columns=[0, 1, 2])
instrs = pd.Series([10, 21, 20, 13],
index=["CLX16", "CLZ16", "CLF17", "CLG17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([22.0, 18.0, 24.0], index=[0, 1, 2])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['COX16', 'COZ16', 'COF17'],
columns=["CO0", "CO1"])
wts = {"CL": wts1, "CO": wts2}
instrs = pd.Series([10, 20, 10, 10, 20, 10],
index=["CLX16", "CLZ16", "CLF17",
"COX16", "COZ16", "COF17"])
generics = mappings.to_generics(instrs, wts)
exp_generics = pd.Series([20.0, 20.0, 20.0, 20.0],
index=["CL0", "CL1", "CO0", "CO1"])
assert_series_equal(generics, exp_generics)
def test_to_generics_two_generics_key_error():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['CLX16', 'CLZ16', 'CLF17'],
columns=[0, 1])
# COZ16 is mistyped as CLO16 resulting in no weights for the instrument
# COZ16
wts2 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=['COX16', 'CLO16', 'COF17'],
columns=[0, 1])
wts = {"CL": wts1, "CO": wts2}
instrs = pd.Series([10, 20, 10, 10, 20, 10],
index=["CLX16", "CLZ16", "CLF17",
"COX16", "COZ16", "COF17"])
with pytest.raises(KeyError):
mappings.to_generics(instrs, wts)
def test_bdom():
exp_cols = ["date", "year", "month", "bdom", "month_code"]
months = {1: "G", 3: "J", 8: "U"}
date_info = mappings.bdom_roll_date("20160115", "20171231", 2, months)
date_info_exp = pd.DataFrame({
"date": [TS("20160302"), TS("20160802"), | TS("20170103") | pandas.Timestamp |
#! /usr/bin/env python3
import argparse
import re,sys,os,math,gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import copy
import math
from math import pi
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy import sparse
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import copy
import math
import seaborn as sns
#from scipy.interpolate import BSpline, make_interp_spline
plt.rcParams.update({'figure.max_open_warning': 100000})
plt.style.use('seaborn-colorblind')
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['savefig.dpi'] = 300 #图片像素
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
__author__ ='赵玥'
__mail__ ='<EMAIL>'
_data__ ='20191101'
def draw_boundaries(ax,Boundary_dict,start,end,samplelist,str_x,sam_x):
ax.tick_params(top='off',bottom='off',left='on',right='off')
for loc in ['top','left','right','bottom']:
ax.spines[loc].set_visible(False)
#ax.spines['left'].set_color('k')
#ax.spines['left'].set_linewidth(2)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['left'].set_linewidth(1)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.set_axis_bgcolor('w')
ax.set(xticks=[])
ax.set(yticks=[])
sample1 = samplelist[0]
sample2 = samplelist[1]
boundary_mid1 = Boundary_dict[sample1]['mid'].tolist()
boundary_mid2 = Boundary_dict[sample2]['mid'].tolist()
bound_y1min = [1.25 for i in boundary_mid1]
bound_y1max = [1.75 for i in boundary_mid1]
bound_y2min = [0.25 for i in boundary_mid2]
bound_y2max = [0.75 for i in boundary_mid2]
ax.set_ylim(0,2)
ax.vlines(boundary_mid1,bound_y1min,bound_y1max,lw=2,color='red')
ax.vlines(boundary_mid2,bound_y2min,bound_y2max,lw=2,color='green')
ax.set_xlim(start,end)
ax.text(str_x,0.5,'bound',horizontalalignment='right',verticalalignment='center',rotation='vertical',transform=ax.transAxes,fontsize=8)
ax.text(sam_x,0.75,sample1,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="red",fontsize=8)
ax.text(sam_x,0.25,sample2,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="green",fontsize=8)
def cut_boundaries(Boundary_dict,sample,boundaryPath,chrom,start,end):
Boundary_df = pd.read_table(boundaryPath,header=0,index_col=None,encoding='utf-8')
Boundary_df = Boundary_df.fillna(0)
Boundary_df = Boundary_df[['start','end']]
Boundary_df['mid'] = (Boundary_df['start'] + Boundary_df['end'])/2
Boundary_df = Boundary_df[Boundary_df['mid']>=start]
Boundary_df = Boundary_df[Boundary_df['mid']<=end]
Boundary_df.reset_index(drop=True)
Boundary_dict[sample] = Boundary_df
return Boundary_dict
def draw_insulation(ax,insu,chrs,start,end,color):
#df_insu=cut_insulation(insu,chrs,start,end)
df_insu=pd.read_table(insu,sep='\t',names=['chrs','start','end','insu'])
ax.tick_params(top='off',bottom='off',left='on',right='off')
line=ax.plot(df_insu['start'],df_insu['insu'], color=color, linewidth=0.8, label="insulation")
ax.set_xlim(start,end)
ax.set_xticks([])
ax.set_ylim(df_insu['insu'].min(),df_insu['insu'].max())
#ax.set_yticks([df_insu['insu'].min(),df_insu['insu'].max()])
for loc in ['left','top','bottom']:
ax.spines[loc].set_linewidth(0)
ax.spines[loc].set_color('black')
ax.spines['right'].set_linewidth(0)
ax.spines[loc].set_color('black')
def draw_SV(files,ax,chrom,start,end,sample,color,types):
markdf=pd.read_table(files,sep='\t')
markdf=markdf[markdf['types']==types]
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['sign']=[1]*len(markdf)
#vectorf = np.vectorize(np.float)
#vectori = np.vectorize(np.int)
#starts=list(markdf['start'])
#hight=list(markdf['sign'])
#width=(markdf['width'])
ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
ax.set_xlim([start,end])
ax.set_ylim([0,1])
xts = np.linspace(start,end,2)
yts = np.linspace(0,1,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks([])
#ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def cut_insulation(insu,chrs,start,end):
file=open(insu)
file_list=[]
for i in file:
i=i.strip()
file_list.append(i)
insu_list=[]
for i in range(len(file_list)):
x=file_list[i].split('/')
insu_list.append([x[-2],file_list[i]])
list_df=pd.DataFrame(insu_list,columns=['chrs','insu'])
list_df=list_df[list_df['chrs']==chrs]
list_df=list_df.reset_index(drop=True)
df_insu=pd.read_table(list_df['insu'][0],sep='\t',names=['chrs','start','end','insu'],comment='t')
df_insu['mid']=(df_insu['start']+df_insu['end'])/2
df_insu=df_insu.fillna(0)
df_insu=df_insu[(df_insu['start']>start)&(df_insu['end']<end)]
return df_insu
def draw_AB(files,res,chrom,start,end,sample,ax):
compartdf = pd.read_table(files,sep='\t',names=['chrom','start','end','eigen1'])
compartdf = compartdf[compartdf['chrom']==chrom]
compartdf = compartdf.reset_index(drop=True)
df = compartdf
df=df[df['end']>=start]
df=df[df['start']<=end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top','bottom']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['eigen1'][i]>0:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0,align='edge')
else:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0,align='edge')
ax.set_ylim(-0.1,0.1)
ax.set_ylabel(sample)
ax.set_yticks([])
ax.set_xticks([])
def Express_Swith(Epipath,chrom,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrom','start','end','sign']
Expressdf = Expressdf[Expressdf['chrom']==chrom]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_epigenetic(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(float(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_epigenetic2(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['width'] = markdf['end'] - markdf['start']
x = np.linspace(start,end,int(len(markdf)/8))
a_BSpline=make_interp_spline(markdf['start'],markdf['sign'],k=3)
y_new=a_BSpline(x)
ax.plot(x, y_new, color=color,linewidth=2)
ax.fill_between(x,y_new ,0,facecolor=color,linewidth=0,label=sample)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,4)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_RNA(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
vectorf = np.vectorize(np.float)
vectori = np.vectorize(np.int)
starts=vectori(markdf['start'])
hight=vectorf(markdf['sign'])
width=vectori(markdf['width'])
ax.bar(x=starts,height=hight,bottom=0,width=width,color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.4,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = | pd.DataFrame() | pandas.DataFrame |
import asyncio
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import Any, Awaitable, Dict, List, Optional, Set, Tuple, Union
import discord
import pandas as pd
from de.config import Config
from de.emojis import Emoji, EmojiMapping, image_base64, load_emojis
from de.logger import logger
DiscordID = str
JSValue = Union[str, int, float, bool, None]
JSArray = List[
Union[
JSValue,
Dict[str, Union[JSValue, Dict[str, Any], List[Any]]],
List[Union[JSValue, Dict[str, Any], List[Any]]],
]
]
JSObject = Dict[
str, Union[JSValue, JSArray, Dict[str, Union[JSValue, Dict[str, Any], List[Any]]]]
]
JSON = Union[JSValue, JSArray, JSObject]
@dataclass
class EmojiResource:
id: DiscordID
name: str
roles: List[DiscordID]
user: Optional[JSON]
require_colons: bool
managed: bool
animated: bool
@classmethod
def from_payload(cls, payload: JSON):
if not isinstance(payload, dict):
raise ValueError(f"Expected payload {payload} to be an object!")
if "id" in payload:
id_ = str(payload["id"])
else:
raise ValueError(f"Expected payload {payload} to include an ID!")
if "name" in payload:
name = str(payload["name"])
else:
raise ValueError(f"Expected payload {payload} to include a name!")
if "roles" in payload:
if isinstance(payload["roles"], list):
roles: List[DiscordID] = [str(id_) for id_ in payload["roles"]]
else:
raise ValueError(
f"Expected payload {payload} to have an array of role IDs!"
)
else:
roles = []
user = payload.get("user", dict())
if "require_colons" in payload:
require_colons = bool(payload["require_colons"])
else:
raise ValueError(
f"Expected payload {payload} to include 'require_colons' property!"
)
if "managed" in payload:
managed = bool(payload["managed"])
else:
raise ValueError(
f"Expected payload {payload} to include 'managed' property!"
)
if "animated" in payload:
animated = bool(payload["animated"])
else:
raise ValueError(
f"Expected payload {payload} to include 'animated' property!"
)
return cls(
id=id_,
name=name,
roles=roles,
user=user,
require_colons=require_colons,
managed=managed,
animated=animated,
)
REPORT_COLS = [
"name",
"action",
"discord_id",
"path",
"roles",
"require_colons",
"managed",
"animated",
]
ReportRow = Dict[str, Any]
def report_row(
name: str,
action: str,
*,
resource: Optional[EmojiResource] = None,
emoji: Optional[Emoji] = None,
):
row: ReportRow = dict(name=name, action=action)
if resource is not None:
row["discord_id"] = resource.id
row["roles"] = ", ".join(resource.roles)
row["require_colons"] = resource.require_colons
row["managed"] = resource.managed
row["animated"] = resource.animated
if emoji is not None:
row["path"] = emoji.path
return row
class UpdateAction:
def __init__(self, slug: str):
self._slug = slug
def __str__(self):
return self._slug
def __repr__(self):
return f"<{self._slug}>"
EDIT = UpdateAction("edit")
REPLACE = UpdateAction("replace")
@dataclass
class Changeset:
update: List[Tuple[str, EmojiResource, Emoji]]
remove: List[Tuple[str, EmojiResource]]
create: List[Tuple[str, Emoji]]
@classmethod
def diff(cls, upstream: List[EmojiResource], local: EmojiMapping) -> "Changeset":
upstream_lookup: Dict[str, EmojiResource] = dict()
upstream_keys: Set[str] = set()
managed_keys: Set[str] = set()
local_keys: Set[str] = set(local.keys())
for up in upstream:
upstream_lookup[up.name] = up
if up.managed:
managed_keys.add(up.name)
logger.info(f"Emoji {up.name} is managed, so leaving it alone...")
else:
upstream_keys.add(up.name)
return cls(
update=[
(key, upstream_lookup[key], local[key])
for key in upstream_keys & local_keys
],
remove=[(key, upstream_lookup[key]) for key in upstream_keys - local_keys],
create=[
(key, local[key]) for key in local_keys - upstream_keys - managed_keys
],
)
def report(self, update_action: UpdateAction = EDIT) -> pd.DataFrame:
table: List[ReportRow] = []
for name, r, e in self.update:
table.append(report_row(name, str(update_action), resource=r, emoji=e))
for name, r in self.remove:
table.append(report_row(name, "remove", resource=r))
for name, e in self.create:
table.append(report_row(name, "create", emoji=e))
df = | pd.DataFrame(data=table, columns=REPORT_COLS) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.