prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
'''
Data Pipeline
tocsv.py: input: video or photo, output: csv containing the bottleneks
train.py: input: geo directory, output: softmax model
predict.py: input: photo, output: label
directory structure:
geo
|
petid_1, petid_2, petid_n, model, found
The "petid_n" directory contains uploaded photos and videos for petid_n.
For each uploaded photo and video, a csv file is created by tocsv.py
The "model" is the trained model.
The "found" directory contains MMS images of pets found in this zipcode.
'''
# python notes
#numpy shape is always a tuple. So for 1-dimension, it is still (n,) and not (n)
# imports
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
#from tensorflow.keras.models import Model
import numpy as np
import pandas as pd
import cv2
import os, glob, re
basedir = "/Volumes/Seagate Expansion Drive/Dog_Dataset/Outdoor/"
datagen = image.ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, horizontal_flip=True)
#Define the base model
base_model = InceptionV3(weights='imagenet', #leave out last fully-connected layer
input_shape=(299,299,3)) #output is 4D tensor of last convolution block
#Define a pipeline model that selects the output of the last layer
bottleneck_creator = Model(inputs=base_model.input, outputs=base_model.get_layer("avg_pool").output)
# image is read from file using cv2
#keras load_img returns PIL image. Format: RGB, channels last format (#rows, #columns, #channels)
#img = image.load_img(img_path, target_size=(299,299), interpolation="bilinear") # bilinear is default)
#convert from PIL to numpy array of type float32
#x = image.img_to_array(img)
def extractBottlenecks(cv2img, augcount):
rgb_img = cv2.cvtColor(cv2img, cv2.COLOR_BGR2RGB)
x = rgb_img.astype(np.float32)
# add a batch dimension
x = np.expand_dims(x, axis=0)
# call model-specific preprocessing function
x = preprocess_input(x)
# WARNING: predict_generator did not work in this Keras. replaced it
#features = bottleneck_creator.predict_generator(datagen.flow(x, batch_size=1), augcount)
#new_df = pd.DataFrame(features, columns=np.arange(2048))
#return new_df
# features = bottleneck_creator.predict(x)
i = 0
df = pd.DataFrame(columns=np.arange(2048))
for xi in datagen.flow(x, batch_size=augcount):
features = bottleneck_creator.predict(xi)
new_df = pd.DataFrame(features, columns=np.arange(2048))
df = pd.concat([df, new_df], axis=0)
#print(df.shape)
i = i+1
if (i == augcount):
break
return df
def photo2csv(photopath, label):
cv2img = cv2.imread(photopath)
AUGCOUNT = 25
resized_img = cv2.resize(cv2img, (299,299), interpolation = cv2.INTER_LINEAR)
done_df = extractBottlenecks(resized_img, AUGCOUNT) #AUGCOUNT is high for photos bc more image data needed
done_df["Label"] = label
return done_df
def video2csv(videopath, label):
bottleneck_df = pd.DataFrame(columns=np.arange(2048))
#cv2 image is a numpy arrage of dtype uint8. Format: BGR
video_object = cv2.VideoCapture(videopath)
total_frames = video_object.get(cv2.CAP_PROP_FRAME_COUNT)
augcount = (2000 // total_frames) + 1
print (total_frames)
num_frames = 0
keep_going = True
while keep_going:
print ("Now on frame: " + str(num_frames))
keep_going, frame = video_object.read()
if keep_going:
resized_img = cv2.resize(frame, (299,299), interpolation = cv2.INTER_LINEAR)
new_df = extractBottlenecks(resized_img, augcount)
bottleneck_df = | pd.concat([bottleneck_df, new_df], axis=0) | pandas.concat |
# %%
import warnings
warnings.filterwarnings("ignore")
from folktables import (
ACSDataSource,
ACSIncome,
ACSEmployment,
ACSMobility,
ACSPublicCoverage,
ACSTravelTime,
)
import pandas as pd
from collections import defaultdict
from scipy.stats import kstest, wasserstein_distance
import seaborn as sns
sns.set_style("whitegrid")
import numpy as np
import random
import sys
import matplotlib.pyplot as plt
# Scikit-Learn
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.linear_model import LogisticRegression, Lasso, LinearRegression
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import (
accuracy_score,
roc_auc_score,
mean_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
)
from sklearn.dummy import DummyRegressor
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
# Specific packages
from xgboost import XGBRegressor, XGBClassifier
import shap
from tqdm import tqdm
# Home made code
import sys
sys.path.append("../")
from fairtools.utils import loop_estimators_fairness, psi, loop_estimators
from ATC_opt import ATC
# Seeding
np.random.seed(0)
random.seed(0)
# %%
# Load data
data_source = ACSDataSource(survey_year="2014", horizon="1-Year", survey="person")
ca_data = data_source.get_data(states=["CA"], download=True)
data_source = ACSDataSource(survey_year="2016", horizon="1-Year", survey="person")
mi_data = data_source.get_data(states=["HI"], download=True)
# %%
states = [
"MI",
"TN",
"CT",
"OH",
"NE",
"IL",
"FL",
]
nooo = [
"OK",
"PA",
"KS",
"IA",
"KY",
"NY",
"LA",
"TX",
"UT",
"OR",
"ME",
"NJ",
"ID",
"DE",
"MN",
"WI",
"CA",
"MO",
"MD",
"NV",
"HI",
"IN",
"WV",
"MT",
"WY",
"ND",
"SD",
"GA",
"NM",
"AZ",
"VA",
"MA",
"AA",
"NC",
"SC",
"DC",
"VT",
"AR",
"WA",
"CO",
"NH",
"MS",
"AK",
"RI",
"AL",
"PR",
]
data_source = ACSDataSource(survey_year="2018", horizon="1-Year", survey="person")
# %%
ca_features, ca_labels, ca_group = ACSEmployment.df_to_numpy(ca_data)
mi_features, mi_labels, mi_group = ACSEmployment.df_to_numpy(mi_data)
## Conver to DF
ca_features = | pd.DataFrame(ca_features, columns=ACSEmployment.features) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
data = {
'reserve': pd.read_pickle('../features/reserve.pkl'),
'store_info': pd.read_pickle('../features/store_info.pkl'),
'visit_data': pd.read_pickle('../features/visit_data.pkl'),
'weather': pd.read_pickle('../features/weather.pkl'),
'weekend': pd.read_pickle('../features/weekend.pkl'),
'sample_submission': | pd.read_csv('../data/sample_submission.csv') | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, hstack
CACHE_LOCATION = 'dataset/data_cache'
OUTPUT_LOCATION = 'dataset/data'
def get_synthetic_data( dim, n, flag='sign'):
w = pd.Series(np.random.randint(2, size=dim+1))
if flag == 'sign':
w = 2*w-1
# generate random features
# X = pd.DataFrame([2*np.random.randint(2, size=dim)-1 for _ in range(n)])## x_i \in {-1,1}
X = pd.DataFrame([np.random.randint(2, size=dim) for _ in range(n)]) ## x_i \in {0,1}
X = X.assign(intercept=pd.DataFrame([1] * n))
X.columns = range(X.shape[1])
y = np.sign(np.dot(X, w))
y = | pd.Series([b if b != 0 else -1 for b in y]) | pandas.Series |
import abc
from typing import List, Tuple
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from fipie.common import ReprMixin
from fipie.date import infer_ann_factor
class Weighting(ReprMixin, metaclass=abc.ABCMeta):
@abc.abstractmethod
def optimise(self, ret: pd.DataFrame, *args, **kwargs) -> pd.Series:
""" Calculate weights for instruments
:param ret: return time-series
:type ret: pd.DataFrame
:return: weights for each instrument
:rtype: pd.Series
"""
class MeanVariance(Weighting):
""" Weights are determined by the mean-variance approach and maximising the Sharpe ratio.
Expected returns and risk are estimated by historical means and covariance matrix """
def __init__(self, fully_invested: bool = True, bounds: Tuple[float, None] = (0, None)):
"""
:param fully_invested: If True, weights are rescaled so that they add up to 100%. By default the optimal weights
are rescaled to add up to 100% as the Sharpe ratio is scale-invariant with respect to the weight.
:type fully_invested: bool, default True
:param bounds: Lower and upper bounds of weights. If None, weights are unbounded, i.e., ``(0, None)`` means
it only allows long positions.
:type bounds: tuple, list-like
.. note::
With default parameters, this class produces a long-only fully-invested portfolio.
"""
self.fully_invested = fully_invested
self.bounds = bounds
def optimise(self, ret: pd.DataFrame, *args, **kwargs) -> pd.Series:
initial_weights = np.ones(len(ret.columns)) / len(ret.columns)
mu = ret.mean()
sigma = ret.cov()
bounds = [self.bounds] * len(ret.columns)
const = create_const(self.fully_invested)
result = minimize(
negative_sharpe_ratio,
initial_weights,
(mu, sigma),
method='SLSQP',
bounds=bounds,
constraints=const,
)
weights = result['x']
weights = pd.Series(weights, index=ret.columns)
return weights
class MinimumVariance(Weighting):
""" Create a portfolio by minimising its variance. """
def __init__(self, fully_invested: bool = True, bounds: Tuple[float, None] = (0, None)):
"""
:param fully_invested: If True, weights are rescaled so that they add up to 100%. By default the optimal weights
are rescaled to add up to 100% as the Sharpe ratio is scale-invariant with respect to the weight.
:type fully_invested: bool, default True
:param bounds: Lower and upper bounds of weights. If None, weights are unbounded, i.e., ``(0, None)`` means
it only allows long positions.
:type bounds: tuple, list-like
.. note::
With default parameters, this class produces a long-only fully-invested portfolio.
"""
self.fully_invested = fully_invested
self.bounds = bounds
def optimise(self, ret: pd.DataFrame, *args, **kwargs) -> pd.Series:
initial_weights = np.ones(len(ret.columns)) / len(ret.columns)
sigma = ret.cov()
bounds = [self.bounds] * len(ret.columns)
const = create_const(self.fully_invested)
result = minimize(
portfolio_variance,
initial_weights,
(sigma,),
method='SLSQP',
bounds=bounds,
constraints=const,
tol=1e-9,
)
weights = result['x']
weights = pd.Series(weights, index=ret.columns)
return weights
class MaximumDiversification(Weighting):
r""" Create a portfolio which maximises the diversification factor
.. math::
\frac{ w^T \cdot \sigma }{ \sqrt{w^T \cdot \Sigma \cdot w} }
where :math:`w` is the weight vector of each instrument, :math:`\sigma` is the volatility vector of each instrument,
:math:`\Sigma` is the covariance matrix.
The numerator of the diversification factor is a weighted average of instrument volatility whereas
the denominator is the portfolio volatility after diversification.
"""
def __init__(self, fully_invested: bool = True, bounds: Tuple[float, None] = (0, None)):
"""
:param fully_invested: If True, weights are rescaled so that they add up to 100%. By default the optimal weights
are rescaled to add up to 100% as the Sharpe ratio is scale-invariant with respect to the weight.
:type fully_invested: bool, default True
:param bounds: Lower and upper bounds of weights. If None, weights are unbounded, i.e., ``(0, None)`` means
it only allows long positions.
:type bounds: tuple, list-like
.. note::
With default parameters, this class produces a long-only fully-invested portfolio.
"""
self.fully_invested = fully_invested
self.bounds = bounds
def optimise(self, ret: pd.DataFrame, *args, **kwargs) -> pd.Series:
initial_weights = np.ones(len(ret.columns)) / len(ret.columns)
sigma = ret.cov()
bounds = [self.bounds] * len(ret.columns)
const = create_const(self.fully_invested)
result = minimize(
negative_diversification_factor,
initial_weights,
(sigma,),
method='SLSQP',
bounds=bounds,
constraints=const,
)
weights = result['x']
weights = pd.Series(weights, index=ret.columns)
return weights
class EqualRiskContribution(Weighting):
r""" Create a portfolio with equal risk contribution (ERC, aka risk parity) such that each instrument contributes
the same amount of risk to the portfolio.
More formally, let :math:`\sigma \left( w \right)` be the volatility of portfolio and :math:`w` be the weight
for each instrument. The volatility of portfolio can be decomposed to the following:
.. math::
\sigma \left( w \right) =
\sum_i \sigma_i \left( x \right) =
\sum_i w_i \frac{ {\partial} \sigma \left( x \right) }{ {\partial} w_i }
where :math:`\sigma_i \left( x \right)` is the total risk contribution of instrument :math:`i`,
:math:`\frac{ {\partial} \sigma \left( x \right) }{ {\partial} w_i }` is the marginal risk contribution.
The ERC portfolio is derived such that all instruments have the same amount of total risk contribution.
**Reference**
- <NAME>., <NAME>. and <NAME>., 2010. The properties of equally weighted risk contribution portfolios. The Journal of Portfolio Management, 36(4), pp.60-70.
"""
def __init__(self, fully_invested: bool = True, bounds: Tuple[float, None] = (0, None)):
"""
:param fully_invested: If True, weights are rescaled so that they add up to 100%. By default the optimal weights
are rescaled to add up to 100% as the Sharpe ratio is scale-invariant with respect to the weight.
:type fully_invested: bool, default True
:param bounds: Lower and upper bounds of weights. If None, weights are unbounded, i.e., ``(0, None)`` means
it only allows long positions.
:type bounds: tuple, list-like
.. note::
With default parameters, this class produces a long-only fully-invested portfolio.
"""
self.fully_invested = fully_invested
self.bounds = bounds
def optimise(self, ret: pd.DataFrame, *args, **kwargs) -> pd.Series:
initial_weights = np.ones(len(ret.columns)) / len(ret.columns)
sigma = ret.cov()
bounds = [self.bounds] * len(ret.columns)
const = create_const(self.fully_invested)
result = minimize(
total_risk_contribution_error,
initial_weights,
(sigma,),
method='SLSQP',
bounds=bounds,
constraints=const,
tol=1e-9,
)
weights = result['x']
weights = | pd.Series(weights, index=ret.columns) | pandas.Series |
"""Create PV simulation input for renewables.ninja."""
import click
import pandas as pd
import geopandas as gpd
from src.utils import Config
from src.capacityfactors import point_raster_on_shapes
@click.command()
@click.argument("path_to_shapes_of_land_surface")
@click.argument("path_to_roof_categories")
@click.argument("path_to_output")
@click.argument("config", type=Config())
def pv_simulation_parameters(path_to_shapes_of_land_surface, path_to_roof_categories, path_to_output,
config):
"""Create PV simulation input for renewables.ninja."""
points = point_raster_on_shapes(
bounds_wgs84=config["scope"]["bounds"],
shapes=gpd.read_file(path_to_shapes_of_land_surface),
resolution_km2=config["parameters"]["ninja"]["resolution-grid"]
)
roof_categories = pd.read_csv(path_to_roof_categories, index_col=[0, 1])
roof_categories = area_to_capacity(
roof_categories,
power_density_flat=config["parameters"]["maximum-installable-power-density"]["pv-on-flat-areas"],
power_density_tilted=config["parameters"]["maximum-installable-power-density"]["pv-on-tilted-roofs"]
).reset_index()
lat_long = pd.DataFrame(
data={
"lat": [point.y for point in points.geometry],
"long": [point.x for point in points.geometry]
}
)
index = pd.MultiIndex.from_product((points.index, roof_categories.index), names=["id", "roof_cat_id"])
data = | pd.DataFrame(index=index) | pandas.DataFrame |
import os
import fnmatch
import shutil
import csv
import pandas as pd
import numpy as np
import glob
import datetime
print(os.path.realpath(__file__))
def FindResults(TaskList, VisitFolder, PartID):
for j in TaskList:
TempFile = glob.glob(os.path.join(VisitFolder,(PartID+'_'+j+'*.csv')))
# Ideally the file names shoudl be checked to pick the latest one
if len(TempFile) > 0:
TaskList[j]['DataFile'] = TempFile[-1]
TaskList[j]['Completed'] = True
return TaskList
def ListOfExpectedResults():
# This list could be a structure
# This list is the list of names in the structure
# Then each would have a flag as to whether it was found
# It can each have the results
TaskList = {}
TaskList['Stroop_Color'] = {}
TaskList['Stroop_Color']['Completed'] = False
TaskList['Stroop_Word'] = {}
TaskList['Stroop_Word']['Completed'] = False
TaskList['Stroop_ColorWord'] = {}
TaskList['Stroop_ColorWord']['Completed'] = False
TaskList['WCST'] = {}
TaskList['WCST']['Completed'] = False
TaskList['DigitSpan_Forward'] = {}
TaskList['DigitSpan_Forward']['Completed'] = False
TaskList['DigitSpan_Backward'] = {}
TaskList['DigitSpan_Backward']['Completed'] = False
TaskList['Matrices_Main'] = {}
TaskList['Matrices_Main']['Completed'] = False
TaskList['DMS_Stair'] = {}
TaskList['DMS_Stair']['Completed'] = False
TaskList['DMS_Block'] = {}
TaskList['DMS_Block']['Completed'] = False
TaskList['VSTM_Stair'] = {}
TaskList['VSTM_Stair']['Completed'] = False
TaskList['VSTM_Block'] = {}
TaskList['VSTM_Block']['Completed'] = False
TaskList['Speed_PatternComp'] = {}
TaskList['Speed_PatternComp']['Completed'] = False
TaskList['Vocab_Antonyms'] = {}
TaskList['Vocab_Antonyms']['Completed'] = False
return TaskList
def ReadFile(VisitFolder, subid, TaskTag):
# Find the file that matches the TaskTag
# If multiple CSV files are found then the user is prompted to pick one.
# Un selected files are renamed with XXX at their beginning.
# The next time this program is run on this folder there will now only be one file
# available and the user will not be prompted again
Data = []
# List all files in the visit folder
ll = os.listdir(VisitFolder)
# create the string you are looking for which is a combo of the subid and the task name
SearchString = subid + '_' + TaskTag
matching = fnmatch.filter(ll,SearchString+'*.csv')
# It is possible that there are multipel files with similar names.
# The following asks the user for the correct one and then renames the others
count = 1
if len(matching) > 1:
# There are more than one file!
print('There are multiple files found for %s in folder: %s'%(SearchString, VisitFolder))
for i in matching:
# print the name and size of files
SizeOfFile = np.round(os.stat(os.path.join(VisitFolder,matching[0])).st_size/1048)
print('\t%d) %s, size = %0.0f kB'%(count, i,SizeOfFile))
count += 1
sel = input('Which one should be kept? (Press return to skip)')
if len(sel) > 0:
SelectedFile = matching[int(sel)-1]
# Rename the unselected files so they will hopefully not be selected the next time!
count = 1
for i in matching:
if not count == int(sel):
OutName = 'XXX_' + i
print(OutName)
shutil.move(os.path.join(VisitFolder,i), os.path.join(VisitFolder, OutName))
count += 1
else:
SelectedFile = False
elif len(matching) == 1:
SelectedFile= matching[0]
else:
SelectedFile = False
print('Did not find any files!!!')
if SelectedFile != False:
# Now open the file
InputFile = os.path.join(VisitFolder, SelectedFile)
# Read whole file into a dataframe
# Note, in order for the data to be read as a dataframe all columns need to have headings.
# If not an error is thrown
Data = | pd.read_csv(InputFile) | pandas.read_csv |
# -*- coding: utf-8 -*-
# -*- python 3 -*-
# -*- <NAME> -*-
# Import packages
import re
import numpy as np
import pandas as pd
import os ##for directory
import sys
import pprint
'''general function for easy use of python'''
def splitAndCombine(gene, rxn, sep0, moveDuplicate=False):
## one rxn has several genes, this function was used to splite the genes
## used for the dataframe data
gene = gene.fillna('NA') # fill the NaN with 'NA'
gene0 = gene.tolist()
rxn0 = rxn.tolist()
s1 = list()
s2 = list()
for i in range(len(gene0)):
s1 = s1 + [rxn0[i]] * len(gene0[i].split(sep0))
s2 = s2 + gene0[i].split(sep0)
df0 = pd.DataFrame({'V1': s1,
'V2': s2}
)
if moveDuplicate == True:
df00 = df0.drop_duplicates()
else:
df00 = df0
return df00
def getSimilarTarget(rxn_yeast0,rxn_newGPR0,ss):
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
rxn_yeast1 = np.array(rxn_yeast0) # np.ndarray()
rxn_yeast2 = rxn_yeast1.tolist()
rxn_yeast3 = pd.Series((v[0] for v in rxn_yeast2))
rxn_newGPR1 = np.array(rxn_newGPR0) # np.ndarray()
rxn_newGPR2 = rxn_newGPR1.tolist()
rxn_newGPR3 = pd.Series((v[0] for v in rxn_newGPR2))
similarTarget = [None] * ss
for i in range(ss):
similarTarget[i] = process.extract(rxn_newGPR3[i], rxn_yeast3, limit=2)
return similarTarget
'''
#example
newMet = pd.read_excel('new metabolite for check.xlsx')
newMet0 = newMet[['name_unify']]
gemMet = pd.read_excel('unique metabolite in yeastGEM.xlsx')
gemMet0 = gemMet[['Description_simple']]
ss0 = len(newMet0)
similarTarget0 = getSimilarTarget(gemMet0,newMet0,ss=ss0)
'''
def singleMapping (description, item1, item2, dataframe=True):
"""get the single description of from item1 for item2 based on mapping"""
#description = w
#item1 = v
#item2 = testData
# used for the list data
if dataframe:
description = description.tolist()
item1 = item1.tolist()
item2 = item2.tolist()
else:
pass
index = [None]*len(item2)
result = [None]*len(item2)
tt = [None]*len(item2)
for i in range(len(item2)):
if item2[i] in item1:
index[i] = item1.index(item2[i])
result[i] = description[index[i]]
else:
index[i] = None
result[i] = None
return result
'''
w=['a','b','c']
v=[1,2,3]
s=[3,1,2,4]
singleMapping(w,v,s,dataframe=False)
'''
def multiMapping (description, item1, item2, dataframe=True, sep=";", removeDuplicates=True):
"""get multiple description of from item1 for item2 based on mapping"""
#description = w
#item1 = v
#item2 = testData
#used for the list data
if dataframe:
description = description.tolist()
item1 = item1.tolist()
item2 = item2.tolist()
else:
pass
result = [None]*len(item2)
for i in range(len(item2)):
if item2[i] in item1:
index0 = [description[index] for index in range(len(item1)) if item1[index] == item2[i]]
if removeDuplicates:
index1 = pd.unique(index0).tolist()
else:
index1 = index0
result[i] = sep.join(str(e) for e in index1) #string cat
else:
result[i] = None
return result
'''
# example data to test all the above function
df1 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2}
)
df2 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'D' : np.random.randn(12)})
df2['C'] = singleMapping(df1['C'], df1['A'], df2['A'])
df2['C'] = multiMapping(df1['C'], df1['A'], df2['A'])
'''
def updateOneColumn(df1, df2, key0, value0):
"""
using dataframe df2 to update the df1
:param df1:
:param df2:
:param key0: the common column name, a string, used for the mapping
:param value0: the column in df2 used to update the df1
:return:
example
df10 = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': ['x', 'y', 'z']})
df20 = pd.DataFrame({'A':['c','b'],
'B': ['e', 'd']})
updateOneColumn(df10,df20,key0='A',value0='B')
"""
df10 = df1.copy()
df11 = df1.copy()
df10[value0] = multiMapping(df2[value0], df2[key0], df10[key0])
for i, x in df10.iterrows():
print(x[value0])
if x[value0] is None:
df11[value0][i] = df11[value0][i]
else:
df11[value0][i] = df10[value0][i]
return df11[value0]
def RemoveDuplicated(s1):
"""
example:
s1=['a // a', 'b // a', None, 'non']
"""
s2=list()
for x in s1:
print(x)
if x =='non':
s2.append('')
elif x is None:
s2.append('')
else:
if "//" in x:
s0= x.split(' // ')
s0 = [x.strip() for x in s0]
s01= list(set(s0))
if len(s01)==1:
s2.append(s01[0])
else:
s2.append(' // '.join(s01))
else:
s2.append(x)
return s2
def nz(value):
'''
Convert None to string else return value.
'''
if value == None:
return 'none'
return value
def AutoUpdate(description1, para1, description2, para2):
# using the description1 in para1 to update the description2 in para2
description1 = description1.tolist()
para1 = para1.tolist()
description2 = description2.tolist()
para2 = para2.tolist()
ss = [None]*len(para2)
for i in range(len(para2)):
if para2[i] in para1:
ss[i] = para1.index(para2[i])
else:
ss[i] = None
for i in range(len(para2)):
if ss[i] != None:
description2[i] = description1[ss[i]]
else:
description2[i] = description2[i]
return description2
'''
# example data to test the followed function
df1 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2}
)
df2 = df1.iloc[[1,2]]
df2['C'] = ['good','good']
df1['C'] = AutoUpdate(df2['C'],df2['A'],df1['C'],df1['A'])
'''
def calculateFrequency(list0, item0):
'''
This function is used to calculate the frequency occured in a list and turn the frequency list into a dataframe
:param list0: ['a','b','a']
:param item0:
:return: a dataframe with two columns
'''
summary = pd.Series(list0).value_counts()
summary = summary.to_frame(name='number')
summary.index.name = item0
summary.reset_index(inplace=True)
return summary
"""function for model part"""
from cobra.manipulation import remove_genes
def getStrainGEMrxn(s0, geneMatrix0, templateGEM, templateGene):
'''
This function is used to produce the strain specific model based on panYeast and gene existence matrix
from 1011 yeast strain genome sequence project
:param s0: strain name 'BFC'
:param geneMatrix0: dataframe contains the gene existence matrix for each strain. geneMatrix = pd.read_csv('../data/geneMatrix0 of 1011 yeast strains.txt', sep="\t")
:templateGEM:
:templateGene:
:return: the rxn list for each new reaction3
'''
s1 = ['geneID', s0]
geneList = geneMatrix0.loc[:, s1]
gene_exist = singleMapping(geneList.loc[:, s0].tolist(), geneList.loc[:, 'geneID'].tolist(), templateGene,
dataframe=False)
gene_exist = [0 if v is None else v for v in gene_exist]
gene_remove = [x for x, y in zip(templateGene, gene_exist) if y < 1]
newModel = templateGEM.copy()
# for i in range(len(gene_remove)):
# print(i)
# remove_genes(newModel, [gene_remove[i]], remove_reactions=True)
remove_genes(newModel, gene_remove, remove_reactions=True)
rxn = []
for x in newModel.reactions:
rxn.append(x.id)
return rxn
def getStrainGEM(s0, geneMatrix0, templateGEM, templateGene):
'''
This function is used to produce the strain specific model based on panYeast and gene existence matrix
from 1011 yeast strain genome sequence project
:param s0: strain name 'BFC'
:param geneMatrix0: dataframe contains the gene existence matrix for each strain. geneMatrix = pd.read_csv('../data/geneMatrix0 of 1011 yeast strains.txt', sep="\t")
:templateGEM:
:templateGene:
:return: the rxn list for each new reaction3
'''
s1 = ['geneID', s0]
geneList = geneMatrix0.loc[:, s1]
gene_exist = singleMapping(geneList.loc[:, s0].tolist(), geneList.loc[:, 'geneID'].tolist(), templateGene,
dataframe=False)
gene_exist = [0 if v is None else v for v in gene_exist]
gene_remove = [x for x, y in zip(templateGene, gene_exist) if y < 1]
newModel = templateGEM.copy()
# for i in range(len(gene_remove)):
# print(i)
# remove_genes(newModel, [gene_remove[i]], remove_reactions=True)
remove_genes(newModel, gene_remove, remove_reactions=True)
return newModel
def getRemoveGeneList(s0, geneMatrix0, templateGEM, templateGene):
'''
This function is used to produce the strain specific model based on panYeast and gene existence matrix
from 1011 yeast strain genome sequence project
:param s0: strain name 'BFC'
:param geneMatrix0: dataframe contains the gene existence matrix for each strain. geneMatrix = pd.read_csv('../data/geneMatrix0 of 1011 yeast strains.txt', sep="\t")
:templateGEM:
:templateGene:
:return: the gene list removed from each strain specific model
'''
s1 = ['geneID', s0]
geneList = geneMatrix0.loc[:, s1]
gene_exist = singleMapping(geneList.loc[:, s0].tolist(), geneList.loc[:, 'geneID'].tolist(), templateGene,
dataframe=False)
gene_exist = [0 if v is None else v for v in gene_exist]
gene_remove = [x for x, y in zip(templateGene, gene_exist) if y < 1]
newModel = templateGEM.copy()
# for i in range(len(gene_remove)):
# print(i)
# remove_genes(newModel, [gene_remove[i]], remove_reactions=True)
remove_genes(newModel, gene_remove, remove_reactions=True)
gene = []
for x in newModel.genes:
gene.append(x.id)
gene_remove_from_model = list(set(templateGene)-set(gene))
return gene_remove_from_model
def updateGPR(gpr0, nameMapping):
'''
This function is used to update the gpr reaction only with 'or' relation. It is used to replace the old gene name using
the new gene name. Also it did not remove the duplicated value.
:param: gpr0
:nameMapping: a dataframe contains the mapping relation between the old and new gene name, has two columns-'geneID', 'panID'
:return: gpr with the replaced new gene name
'''
#this function is mainly used to update the gene relation with 'or'
s1 = gpr0
s2 = s1.split(' ')
s3 = singleMapping(nameMapping['panID'].tolist(),nameMapping['geneID'].tolist(),s2, dataframe=False)
for i, x in enumerate(s3):
if x is None:
s3[i]=s2[i]
else:
s3[i] = s3[i]
s4 = ' '.join(s3)
return s4
def getCompartment(rxn):
"""
This function is used to obtain the compartment information from reaction of yeastGEM
:param rxn: example acetyl-CoA[m] + L-glutamate[m] -> coenzyme A[m] + H+[m] + N-acetyl-L-glutamate[m]'
:return:
"""
cp1 = ['[c]','[ce]','[e]','[er]','[erm]','[g]','[gm]','[lp]','[m]','[mm]','[n]','[p]','[v]','[vm]']
cp2 = ['cytoplasm','cell envelope','extracellular','endoplasmic reticulum','endoplasmic reticulum membrane','Golgi','Golgi membrane','lipid particle',
'mitochondrion','mitochondrial membrane','nucleus','peroxisome','vacuole','vacuolar membrane']
cp = [None]*len(cp1)
for i in range(len(cp1)):
if cp1[i] in rxn:
cp[i] = cp2[i]
else:
cp[i] = None
cp1 = [x for i,x in enumerate(cp) if x is not None]
cp0 = ';'.join(str(e) for e in cp1)
return cp0
def getCommonCompartment(c1,c2, sep0=";"):
'''this function could get the common part between string c1 and c2
for example, c1="a;b", c2="a;c" '''
if c1 is None:
c10 = 'NONE'
else:
c10 = c1.split(sep0)
c10 = [x.strip() for x in c10]
if c2 is None:
c20 = 'NONE'
else:
c20 = c2.split(sep0)
c20 = [x.strip() for x in c20]
c3 = list(set(c10).intersection(c20))
c4 = sep0.join(str(e) for e in c3)
return c4
def getRXNgeneMapping(rxn0, gpr0):
'''this function is used to split the GPR;
input, for example rxn0=['r1','g2']
gpr0=['a or c','a and b']
output, each rxn related with each gene'''
s1 = rxn0
s2 = gpr0
s2 = s2.str.replace('and','@')
s2 = s2.str.replace('or','@')
s2 = s2.str.replace('\\( ','')
s2 = s2.str.replace('\\(\\( ','')
s2 = s2.str.replace('\\(', '')
s2 = s2.str.replace('\\(\\(', '')
s2 = s2.str.replace(' \\)','')
s2 = s2.str.replace(' \\)\\) ','')
s2 = s2.str.replace('\\)', '')
s2 = s2.str.replace('\\)\\) ', '')
s3 = splitAndCombine(s2,s1,sep0="@")
s3['V2'] = s3['V2'].str.strip()
s3.columns = ['rxnID', 'gene']
return s3
def getRXNmetaboliteMapping(rxn0, met0):
'''this function is used to split the equation of metabolites; used to produce the dataframe format of GEM using
cobrapy
input, for example rxn0=['r1','g2']
gpr0=['a => c','a => b']
output, each rxn related with each gene'''
met_annotation = | pd.read_excel('/Users/luho/PycharmProjects/model/cobrapy/result/met_yeastGEM.xlsx') | pandas.read_excel |
"""
Provides processing functions for CRSP data.
"""
from pilates import wrds_module
import pandas as pd
import numpy as np
import numba
from sklearn.linear_model import LinearRegression
class crsp(wrds_module):
def __init__(self, d):
wrds_module.__init__(self, d)
# Initialize values
self.col_id = 'permno'
self.col_date = 'date'
self.key = [self.col_id, self.col_date]
# For link with COMPUSTAT
self.linktype = ['LU', 'LC', 'LS']
# self.linkprim = ['P', 'C']
# Default data frequency
self.freq = 'M'
def set_frequency(self, frequency):
if frequency in ['Monthly', 'monthly', 'M', 'm']:
self.freq = 'M'
self.sf = self.msf
self.si = self.msi
elif frequency in ['Daily', 'daily', 'D', 'd']:
self.freq = 'D'
self.sf = self.dsf
self.si = self.dsi
else:
raise Exception('CRSP data frequency should by either',
'Monthly or Daily')
def permno_from_gvkey(self, data):
""" Returns CRSP permno from COMPUSTAT gvkey.
This code is insired from WRDS sample program 'merge_funda_crsp_byccm.sas'
available on the WRDS website.
Arguments:
data -- User provided data.
Required columns: [gvkey, datadate]
link_table -- WRDS provided linktable (ccmxpf_lnkhist)
linktype -- Default: [LC, LU]
linkprim -- Default: [P, C]
"""
# Columns required from data
key = ['gvkey', 'datadate']
# Columns required from the link table
cols_link = ['gvkey', 'lpermno', 'linktype', 'linkprim',
'linkdt', 'linkenddt']
# Open the user data
df = self.open_data(data, key).drop_duplicates().dropna()
## Create begin and edn of fiscal year variables
#df['endfyr'] = df.datadate
#df['beginfyr'] = (df.datadate - np.timedelta64(11, 'M')).astype('datetime64[M]')
# Open the link data
link = self.open_data(self.linktable, cols_link)
link = link.dropna(subset=['gvkey', 'lpermno', 'linktype', 'linkprim'])
# Retrieve the specified links
link = link[(link.linktype.isin(self.linktype))]
#link = link[['gvkey', 'lpermno', 'linkdt', 'linkenddt']]
# Merge the data
dm = df.merge(link, how='left', on='gvkey')
# Filter the dates (keep correct matches)
## Note: Use conditions from WRDS code.
cond1 = (dm.linkdt <= dm.datadate) | (pd.isna(dm.linkdt))
cond2 = (dm.datadate <= dm.linkenddt) | (pd.isna(dm.linkenddt))
dm = dm[cond1 & cond2]
# Deal with duplicates
dups = dm[key].duplicated(keep=False)
dmf = dm[~dups] # Final links list
dmd = dm[dups].set_index(['gvkey', 'datadate'])
## Favor linkprim, in order: 'P', 'C', 'J' and 'N'
for lp in ['P', 'C', 'J']:
dups_lp = dmd[dmd.linkprim==lp]
dmd = dmd[~dmd.index.isin(dups_lp.index)]
dups_lp = dups_lp.reset_index()
dmf = pd.concat([dmf, dups_lp])
# Rename lpermno to permno and remove unnecessary columns
dmf = dmf.rename(columns={'lpermno': 'permno'})
dmf = dmf[['gvkey', 'datadate', 'permno']]
# Check for duplicates on the key
n_dup = dmf.shape[0] - dmf[key].drop_duplicates().shape[0]
if n_dup > 0:
print("Warning: The merged permno",
"contains {:} duplicates".format(n_dup))
# Add the permno to the user's data
dfu = self.open_data(data, key).dropna()
dfin = dfu.merge(dmf, how='left', on=key)
dfin.index = dfu.index
return(dfin.permno)
def permno_from_cusip(self, data):
""" Returns CRSP permno from CUSIP.
Note: this function does not ensure a 1-to-1 mapping and there might
be more than one cusip for a given permno (several cusips may have the
same permno).
Args:
data -- User provided data.
Required columns: ['cusip']
The cusip needs to be the CRSP ncusip.
"""
dfu = self.open_data(data, ['cusip'])
cs = dfu.drop_duplicates()
pc = self.open_data(self.msenames, ['ncusip', 'permno'])
pc = pc.drop_duplicates()
# Merge the permno
csf = cs.merge(pc, how='left', left_on=['cusip'], right_on=['ncusip'])
csf = csf[['cusip', 'permno']].dropna().drop_duplicates()
dfin = dfu.merge(csf, how='left', on='cusip')
dfin.index = dfu.index
return(dfin.permno)
def _adjust_shares(self, data, col_shares):
""" Adjust the number of shares using CRSP cfacshr field.
Arguments:
data -- User provided data.
Required fields: [permno, 'col_shares', 'col_date']
col_shares -- The field with the number of shares from data.
col_date -- The date field from data to use to compute the
adjustment.
"""
# Open and prepare the user data
cols = ['permno', col_shares, self.d.col_date]
dfu = self.open_data(data, cols)
index = dfu.index
dt = pd.to_datetime(dfu[self.d.col_date]).dt
dfu['year'] = dt.year
dfu['month'] = dt.month
# Open and prepare the CRSP data
cols = ['permno', 'date', 'cfacshr']
df = self.open_data(self.msf, cols)
dt = pd.to_datetime(df.date).dt
df['year'] = dt.year
df['month'] = dt.month
# Merge the data
key = ['permno', 'year', 'month']
dfu = dfu[key+[col_shares]].merge(df[key+['cfacshr']],
how='left', on=key)
dfu.loc[dfu.cfacshr.isna(), 'cfacshr'] = 1
# Compute the adjusted shares
dfu['adj_shares'] = dfu[col_shares] * dfu.cfacshr
dfu.index = index
return(dfu.adj_shares.astype('float32'))
def _get_fields(self, fields, data=None, file=None):
""" Returns the fields from CRSP.
This function is only used internally for the CRSP module.
Arguments:
fields -- Fields from file_fund
data -- User provided data
Required columns: [permno, date]
If none given, returns the entire compustat with key.
Otherwise, return only the fields with the data index.
file -- File to use. Default to stock files
"""
# Get the fields
# Note: CRSP data is clean without duplicates
if not file:
file = self.sf
key = [self.col_id, self.col_date]
if file == self.si:
key = [self.col_date]
df = self.open_data(file, key+fields)
# Construct the object to return
if data is not None:
# Merge and return the fields
data_key = self.open_data(data, key)
index = data_key.index
dfin = data_key.merge(df, how='left', on=key)
dfin.index = index
return(dfin[fields])
else:
# Return the entire dataset with keys
return(df)
def get_fields_daily(self, fields, data):
""" Returns the fields from CRSP daily.
Arguments:
fields -- Fields from file_fund
data -- User provided data
Required columns: [permno, date]
If none given, returns the entire compustat with key.
Otherwise, return only the fields with the data index.
Requires:
self.d.col_date -- Date field to use for the user data
"""
keyu = ['permno', self.d.col_date]
dfu = self.open_data(data, keyu)
dfu.loc[:, self.col_date] = dfu[self.d.col_date]
dfu[fields] = self._get_fields(fields, dfu, self.dsf)
return(dfu[fields])
# def _get_window_sort(self, nperiods, caldays, min_periods):
# # Define the window and how the data should be sorted
# if caldays is None:
# window = abs(nperiods)
# ascending = (nperiods < 0)
# else:
# window = str(abs(caldays)) + "D"
# ascending = (caldays < 0)
# if min_periods is None:
# print("Warning: It is advised to provide a minimum number of observations "
# "to compute aggregate values when using the 'caldays' arguments. "
# "No doing so will result in small rolling windows.")
# return window, ascending
def _value_for_data(self, var, data, ascending, useall):
"""" Add values to the users data and return the values.
Arguments:
df -- Internal data containing the values
Columns: [permno, date]
The data is indexed by date and grouped by permno.
data -- User data
Columns: [permno, wrds.col_date]
nperiods -- Number of periods to compute the variable
useall -- If True, use the compounded return of the last
available trading date (if nperiods<0) or the
compounded return of the next available trading day
(if nperiods>0).
"""
key = self.key
var.name = 'var'
values = var.reset_index()
#if nperiods == 0:
# values = var.reset_index()
#else:
# # Check that the shift onl occurs within permnos
# import ipdb; ipd.set_trace();
# values = var.shift(-nperiods).reset_index()
# Make sure the types are correct
values = self._correct_columns_types(values)
# Open user data
cols_data = [self.col_id, self.d.col_date]
dfu = self.open_data(data, cols_data)
# Prepare the dataframes for merging
dfu = dfu.sort_values(self.d.col_date)
dfu = dfu.dropna()
values = values.sort_values(self.col_date)
if useall:
# Merge on permno and on closest date
# Use the last or next trading day if requested
# Shift a maximum of 6 days
if ascending:
direction = 'backward'
else:
direction = 'forward'
dfin = pd.merge_asof(dfu, values,
left_on=self.d.col_date,
right_on=self.col_date,
by=self.col_id,
tolerance=pd.Timedelta('6 day'),
direction=direction)
else:
dfin = dfu.merge(values, how='left', left_on=cols_data, right_on=self.key)
dfin.index = dfu.index
return(dfin['var'].astype('float32'))
def _value_for_data_index(self, var, data, ascending, useall):
"""" Add indexes values to the users data and return the values.
Arguments:
df -- Internal data containing the values
Columns: [permno, date]
The data is indexed by date and grouped by permno.
data -- User data
Columns: [permno, wrds.col_date]
nperiods -- Number of periods to compute the variable
useall -- If True, use the compounded return of the last
available trading date (if nperiods<0) or the
compounded return of the next available trading day
(if nperiods>0).
"""
var.name = 'var'
values = var.reset_index()
values = self._correct_columns_types(values)
# Open user data
cols_data = [self.d.col_date]
dfu = self.open_data(data, cols_data)
# Prepare the dataframes for merging
dfu = dfu.sort_values(self.d.col_date)
dfu = dfu.dropna()
values = values.sort_values(self.col_date)
if useall:
# Merge on permno and on closest date
# Use the last or next trading day if requested
# Shift a maximum of 6 days
if ascending:
direction = 'backward'
else:
direction = 'forward'
dfin = pd.merge_asof(dfu, values,
left_on=self.d.col_date,
right_on=self.col_date,
tolerance=pd.Timedelta('6 day'),
direction=direction)
else:
dfin = dfu.merge(values, how='left', left_on=cols_data, right_on=self.col_date)
dfin.index = dfu.index
return(dfin['var'].astype('float32'))
##########################
# Variables Computations #
##########################
def _tso(self, data):
""" Compute total share outstanding. """
cols = ['permno', 'date', 'shrout', 'cfacshr']
df = self.open_data(self.sf, cols)
if self.freq == 'M':
dt = pd.to_datetime(df.date).dt
df['year'] = dt.year
df['month'] = dt.month
key = ['permno', 'year', 'month']
else:
key = ['permno', 'date']
df['tso'] = df.shrout * df.cfacshr * 1000
dfu = self.open_data(data, ['permno', self.d.col_date])
if self.freq == 'M':
dt = pd.to_datetime(dfu[self.d.col_date]).dt
dfu['year'] = dt.year
dfu['month'] = dt.month
else:
dfu['date'] = dfu[self.d.col_date]
dfin = dfu.merge(df[key+['tso']], how='left', on=key)
dfin.index = dfu.index
return(dfin.tso)
def compounded_return(self, data, nperiods=1, caldays=None, min_periods=None,
logreturn=False, useall=True):
r"""
Return the compounded daily returns over 'nperiods' periods.
If using daily frequency, one period refers to one day.
If using monthly frequency, one period refers to on month.
Arguments:
data -- User data.
Required columns: [permno, 'col_date']
nperiods -- Number of periods to use to compute the compounded
returns. If positive, compute the return over
'nperiods' in the future. If negative, compute the
return over abs(nperiods) in the past.
useall -- If True, use the compounded return of the last
available trading date (if nperiods<0) or the
compounded return of the next available trading day
(if nperiods>0).
"""
# Get the window and sort
window, ascending = self._get_window_sort(nperiods, caldays, min_periods)
# Check arguments
if nperiods==0:
raise Exception("nperiods must be different from 0.")
# Open the necessary data
key = self.key
fields = ['ret']
sf = self._get_fields(fields)
# Create the time series index
sf = sf.set_index(self.col_date).sort_index(ascending=ascending)
# Compute the compounded returns
sf['ln1ret'] = np.log(1 + sf.ret)
sumln = sf.groupby(self.col_id).rolling(window, min_periods=min_periods).ln1ret.sum()
if logreturn:
cret = sumln
else:
cret = np.exp(sumln) - 1
# Return the variable for the user data
return(self._value_for_data(cret, data, ascending, useall))
def volatility_return(self, data, nperiods=1, caldays=None, min_periods=None, useall=True):
r"""
Return the daily volatility of returns over 'nperiods' periods.
If using daily frequency, one period refers to one day.
If using monthly frequency, one period refers to on month.
Args:
data -- User data.
Required columns: [permno, 'col_date']
nperiods -- Number of periods to use to compute the volatility.
If positive, compute the volatility over
'nperiods' in the future. If negative, compute the
volatility over abs(nperiods) in the past.
useall -- If True, use the volatility of the last
available trading date (if nperiods<0) or the
volatility of the next available trading day
(if nperiods>0).
"""
# Get the window and sort
window, ascending = self._get_window_sort(nperiods, caldays, min_periods)
# Open the necessary data
key = [self.col_id, self.col_date]
fields = ['ret']
sf = self._get_fields(fields)
# Create the time series index
sf = sf.set_index(self.col_date).sort_index(ascending=ascending)
# Compute the volatility
vol = sf.groupby(self.col_id).rolling(window, min_periods=min_periods).ret.std()
return(self._value_for_data(vol, data, ascending, useall))
def average_bas(self, data, nperiods=1, caldays=None, min_periods=None, useall=True, bas=None):
r"""
Return the daily average bid-ask spread over 'nperiods' periods.
If using daily frequency, one period refers to one day.
If using monthly frequency, one period refers to on month.
Args:
data -- User data.
Required columns: [permno, 'col_date']
nperiods -- Number of periods to use to compute the volatility.
If positive, compute the average bid-ask spread over
'nperiods' in the future. If negative, compute the
average bid-ask spread over abs(nperiods) in the past.
useall -- If True, use the bid-ask spread of the last
available trading date (if ndays<0) or the
bid-ask spread of the next available trading day
(if ndays>0).
bas -- Type of bid and ask to use. If None, use the fields
'bid' and 'ask' from CRSP. If 'lohi', use the fields
'bidlo' and 'askhi' from CRSP.
"""
# Get the window and sort
window, ascending = self._get_window_sort(nperiods, caldays, min_periods)
# Open the necessary data
key = self.key
fields = ['bid', 'ask']
if bas is None:
fs = ['bid', 'ask']
elif bas == 'lohi':
fs = ['bidlo', 'askhi']
else:
raise Exception("'bas' argument only accepts None or 'lohi'.")
dsf = self._get_fields(fs)
dsf.columns = key + fields
# Create the time series index
dsf = dsf.set_index(self.col_date).sort_index(ascending=ascending)
# Compute the average bid-ask spread
# (Ask - Bid) / midpoint
dsf['spread'] = (dsf.ask - dsf.bid) / ((dsf.ask + dsf.bid)/2.)
# Bid-ask spread cannot be negative
dsf['spread'] = dsf['spread'].clip(0, None)
bas = dsf.groupby('permno').rolling(window, min_periods=min_periods).spread.mean()
return(self._value_for_data(bas, data, ascending, useall))
def turnover(self, data, nperiods=1, caldays=None, min_periods=None, useall=True):
r""" Return the daily turnover over 'nperiods' (usually days).
Args:
data (DataFrame): User data.
Required columns: [permno, 'col_date']
nperiods (int): Number of periods (usually days)
to use to compute the turnover.
If positive, compute the turnover over
'nperiods' in the future. If negative, compute the
turnover over abs(nperiods) in the past.
useall (bool): If True, use the turnover of the last
available trading date (if nperiods<0) or the
turnover of the next available trading day
(if nperiods>0).
"""
# Get the window and sort
window, ascending = self._get_window_sort(nperiods, caldays, min_periods)
# Open the necessary data
key = self.key
fields = ['shrout', 'vol']
# Note: The number of shares outstanding (shrout) is in thousands.
# Note: The volume in the daily data is expressed in units of shares.
dsf = self._get_fields(fields)
# Some type conversion to make sure the rolling window will work.
dsf['shrout'] = dsf.shrout.astype('float32')
# Create the time series index
dsf = dsf.set_index(self.col_date).sort_index(ascending=ascending)
# Compute the average turnover
dsf['vol_sh'] = dsf.vol / (dsf.shrout * 1000)
turnover = dsf.groupby(self.col_id).rolling(window, min_periods=min_periods).vol_sh.mean()
return(self._value_for_data(turnover, data, ascending, useall))
def turnover_shu2000(self, data, nperiods=1, caldays=None, min_periods=None, useall=True):
r"""
Return the daily turnover over 'ndays' days.
Arguments:
data -- User data.
Required columns: [permno, 'col_date']
col_date -- Column of the dates at which to compute the turnover.
ndays -- Number of days to use to compute the turnover.
If positive, compute the turnover over
'ndays' in the future. If negative, compute the
turnover over abs(ndays) in the past.
useall -- If True, use the turnover of the last
available trading date (if ndays<0) or the
turnover of the next available trading day
(if ndays>0).
"""
# Get the window and sort
window, ascending = self._get_window_sort(nperiods, caldays, min_periods)
# Open the necessary data
key = self.key
fields = ['shrout', 'vol']
# Note: The number of shares outstanding (shrout) is in thousands.
# Note: The volume in the daily data is expressed in units of shares.
dsf = self._get_fields(fields)
# Create the time series index
dsf = dsf.set_index(self.col_date).sort_index(ascending=ascending)
# Compute the average turnover
dsf['vol_sh'] = dsf.vol / (dsf.shrout * 1000)
dsf['onemvs'] = 1. - dsf.vol_sh
turnover = 1 - dsf.groupby(self.col_id).rolling(window, min_periods=min_periods).onemvs.apply(np.prod, raw=True)
return(self._value_for_data(turnover, data, ascending, useall))
def age(self, data):
""" Age of the firm - Number of years with return history.
When no information is available from CRSP, computes the age with
the user data (assumes the user data is 'complete'). Otherwise, age
is the max of the user data's age and CRSP age.
"""
# Use the stocknames table to obtain the date range of permno identifiers
sn = self.open_data(self.stocknames, [self.col_id, 'namedt', 'nameenddt'])
# Open the user data
cols_data = [self.d.col_id, self.d.col_date, self.col_id]
dfu = self.open_data(data, cols_data)
index = dfu.index
# Compte age using user data only first
dfin = dfu.copy()
dfin['mindate'] = dfu.groupby(self.d.col_id)[self.d.col_date].transform('min')
dfin['age_data'] = dfin[self.d.col_date] - dfin.mindate
# Compute the age with CRSP data
## Min date per permno
sn = sn.groupby(self.col_id)['namedt'].min().reset_index()
sn = self._correct_columns_types(sn)
## Merge start with user data
dfin = dfin.merge(sn, how='left', on='permno')
## Compute age with earliest crsp date by user data id (gvkey if so)
dfin['age_crsp'] = dfin[self.d.col_date] - dfin.namedt
# Get the final age (max of data and crsp)
dfin['age'] = dfin.age_data
dfin.loc[dfin.age_crsp > dfin.age, 'age'] = dfin.age_crsp
dfin['age'] = dfin.age.dt.days / 365.
dfin.index = dfu.index
return(dfin.age.astype('float32'))
def beta(self, data, nperiods=1, caldays=None, min_periods=None, useall=True):
r"""
Return the beta coefficient computed over 'ndays' days.
The beta is the slope of the regression of daily stock returns
on equal-weighted market returns.
Arguments:
data -- User data.
Required columns: [permno, 'col_date']
col_date -- Column of the dates at which to compute the value.
ndays -- Number of days to use to compute the value.
If positive, compute over 'ndays' in the future.
If negative, compute over abs(ndays) in the past.
useall -- If True, use the value of the last
available trading date (if ndays<0) or the
value of the next available trading day
(if ndays>0).
"""
# Get the window and sort
window, ascending = self._get_window_sort(nperiods, caldays, min_periods)
# Open the necessary data
key = self.key
ret = 'ret'
mret = 'vwretd'
dsf = self._get_fields([ret])
# Get the value-weighted market returns
dsf[mret] = self._get_fields([mret], data=dsf, file=self.dsi)
# Create the time series index
dsf = dsf.set_index(self.col_date).sort_index(ascending=ascending)
# Compute the rolling beta
def compute_beta(dfg):
cov = dfg[[ret, mret]].rolling(window=window, min_periods=min_periods).cov()
cov = cov.unstack()[mret] # Gives cov(ret, mret) and cov(mret, mret)
beta = cov[ret] / cov[mret]
return beta
beta = dsf.groupby(self.col_id).apply(compute_beta)
return(self._value_for_data(beta, data, ascending, useall))
def delist(self, data, caldays=1):
r"""
Return the delist dummy.
Delist is at one if the firm is delisted within the next 'caldays' days
for financial difficulties.
Arguments:
data -- User data.
Required columns: [permno, 'col_date']
col_date -- Column of the dates at which to compute the value.
caldays -- Number of calendar days to consider.
The delist dummy equals one if there is a delisting in
the following caldays calendar days.
"""
# Open the necessary data
key = self.key
dse = self._get_fields(['dlstcd'], file=self.dse)
# Keep the correct delisting codes (for financial difficulties)
codes = [400, 401, 403, 450, 460, 470, 480, 490,
552, 560, 561, 572, 574, 580, 582]
# Remove missing values
dse = dse.dropna()
# Convert field to Integer
# dse.dlstcd = dse.dlstcd.astype(int)
# Create the delist dummy
dse.loc[dse.dlstcd.isin(codes), 'delist'] = 1
dse = dse[dse.delist==1]
dse['startdate'] = dse.date - pd.to_timedelta(caldays, unit='d')
dse['enddate'] = dse.date
dse = dse[['permno','startdate', 'enddate']]
# Open user data
cols_data = [self.col_id, self.d.col_date]
dfu = self.open_data(data, cols_data)
index = dfu.index
# Merge the dates and create the delist dummy
dfin = dfu.merge(dse, how='left', on=self.col_id)
dfin['delist'] = 0
dfin.loc[(dfin[self.d.col_date]>=dfin.startdate) &
(dfin[self.d.col_date]<=dfin.enddate), 'delist'] = 1
dfin.index = index
return(dfin.delist)
def crashrisk_hmt(self, data):
""" Crash Risk as defined in Hutton, Marcus and Tehranian (JFE 2009).
Note: Computing the crash risk requires both CRSP and Compustat data
as one needs to identify fiscal years. Therefore the availability of
this variable depends on the match CRSP-Compustat.
It is therefore recommended to add the result of this funtion to
Compustat data.
Arguments:
data -- User data.
Required columns: [permno, datadate]
"""
# Open the necessary data
## Get the returns
df = self._get_fields(['ret'])
## Get the value-weighted market index
df['vwretd'] = self._get_fields(['vwretd'], data=df, file=self.dsi)
# Identify the weeks
df['year'] = df.date.dt.year
df['week'] = df.date.dt.isocalendar().week
df['yw'] = df.year*100 + df.week
df.drop(columns=['year', 'week'], inplace=True)
# Compute weekly returns for stock and index
df['ln1ret'] = np.log(1+df.ret)
df['ln1vwret'] = np.log(1+df.vwretd)
ln1ret_week = df.groupby([self.col_id, 'yw']).ln1ret.sum()
ln1vwret_week = df.groupby([self.col_id, 'yw']).ln1vwret.sum()
ret_week = np.exp(ln1ret_week) - 1
vwret_week = np.exp(ln1vwret_week) - 1
dfret = pd.concat([ret_week, vwret_week], axis=1)
dfret.columns = ['ret', 'vwret']
# Regress stock returns on index (with lead and lag terms)
dfret['vwret_lag1'] = dfret.groupby(level='permno')['vwret'].shift(1)
dfret['vwret_lead1'] = dfret.groupby(level='permno')['vwret'].shift(-1)
dfret = dfret.reset_index()
# Drop missing values
dfret = dfret.dropna()
# Compute firm-specific weekly returns
## Note: one regression by permno
def spec_week_ret(dfg):
y = dfg.ret
X = dfg[['vwret_lag1', 'vwret', 'vwret_lead1']]
m = LinearRegression().fit(X, y)
return np.log(1 + (y-m.predict(X)))
dfsret = dfret.groupby('permno').apply(spec_week_ret)
dfsret = dfsret.reset_index()
dfsret.index = dfsret.level_1
# Add the specific weekly return to the return data
dfret['specret'] = dfsret['ret']
## Get compustat fiscal years
comp_freq = self.d.comp.freq
self.d.comp.set_frequency('Annual')
df_comp = self.d.comp.get_fields([])
self.d.comp.set_frequency(comp_freq)
## Add permno
df_comp['permno'] = self.permno_from_gvkey(df_comp)
#df_comp['datadate_m'] = df_comp.datadate
df_comp = df_comp[['permno', 'datadate']]
## Merge compustat datadate to CRSP data
### Create a date column corresponding to the end of the year-week.
dfret['year'] = (dfret.yw / 100).astype(int)
dfret['week'] = dfret.yw - (dfret.year*100)
dfret['ydt'] = | pd.to_datetime(dfret.year, format='%Y') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 11:31:36 2016
@author: zbarge
"""
import os
import sqlite3
import requests
import pandas as pd
from time import sleep
from .SimpleSQLite3 import SimpleSQLite3
#======================================================#
"""These lists provide information to the ListWise.parse_email method. """
# Enter domains to exclude from being considered valid
ILLEGAL_EMAIL_DOMAINS = []
ILLEGAL_EMAIL_CHARS = ["'", "/", "#", "\"",
"\\", "$", "%", "&",
")", "(", "*"]
EMAIL_CHARS_TO_SPLIT = [';', ' ', ',', ':', '|']
ILLEGAL_SCRUB_ITEMS = ILLEGAL_EMAIL_DOMAINS + ILLEGAL_EMAIL_CHARS
#=======================================================#
EMAILS_SQL_TABLE = """CREATE TABLE emails (
email VARCHAR (30) UNIQUE ON CONFLICT REPLACE,
email_status VARCHAR (30),
error_code VARCHAR (30),
error_msg VARCHAR (30),
free_mail VARCHAR (30),
insertdate DATETIME DEFAULT (DATETIME('now', 'localtime') ),
typo_fixed VARCHAR (30),
dealno INT (30) DEFAULT (0),
clean_type INT (30) DEFAULT (0),
email_id INTEGER PRIMARY KEY ON CONFLICT REPLACE AUTOINCREMENT,
updatedate DATETIME DEFAULT (DATETIME('now', 'localtime') )
);"""
DOMAINS_SQL_TABLE = """CREATE TABLE domains (
domain_id INTEGER PRIMARY KEY ON CONFLICT REPLACE AUTOINCREMENT,
domain VARCHAR(30) UNIQUE ON CONFLICT IGNORE,
valid INT(1))"""
TABLE_STRUCTURES = {'emails':EMAILS_SQL_TABLE, 'domains': DOMAINS_SQL_TABLE}
# Table names are emails, field names are email
email, emails, email2, emails2 = 'email', 'emails', 'email2', 'emails2'
EMAIL = 'email'
EMAILS = 'emails'
EMAIL2 = 'email2'
EMAILS2 = 'emails2'
DOMAIN = 'domain'
DOMAINS = 'domains'
#The email_status field from ListWise can contain any of the following statuses
#The only statuses considered valid are "clean", "catch-all"
#processing status data need to be rerun.
EMAIL_STATUS = 'email_status'
CLEAN = 'clean'
CATCHALL = 'catch-all'
PROCESSING = 'processing'
BADMX = 'bad-mx'
BOUNCED = 'bounced'
INVALID = 'invalid'
NOREPLY = 'no-reply'
SPAMTRAP = 'spam-trap'
SUSPICIOUS = 'suspicious'
UNKNOWN = 'unknown'
FREE_MAIL = 'free_mail'
TYPO_FIXED = 'typo_fixed'
ERROR_CODE = 'error_code'
class InvalidCredentialsError(Exception): pass
class ListWise:
"""
A python class wrapping the API to ListWise e-mail address cleaner.
https://www.listwisehq.com/email-address-cleaner/index.php
This class allows the user to validate an individual
e-mail addresses or a Pandas DataFrame.
The responses from ListWise are stored in a
database and valid e-mails are returned to the DataFrame.
Invalid e-mails are replaced with nothing.
This process seems to take about 0.75 seconds per e-mail address.
"""
def __init__(self, database_path, username=None, api_key=None, test_credentials=True):
self._api_key = api_key
self._username = username
self._db_path = database_path
self._queued_emails = []
self._bad_emails = []
self._errored_responses = {}
self._db = SimpleSQLite3(self._db_path)
self._db.set_row_factory(sqlite3.Row)
self._create_tables()
if test_credentials:
self.test_credentials()
@property
def db(self):
"""The connection to PandaLite/SQLite database. """
return self._db
def test_credentials(self):
"""Checks a ListWise API response and
raises an InvalidCredentialsError if the credentials
are invalid. Returns True otherwise."""
data = self._deep_clean('<EMAIL>')
error = data.get(ERROR_CODE, None)
if error in (1,2):
raise InvalidCredentialsError("Credentials are invalid for user '{}'".format(self._username))
return True
def _create_tables(self):
for table,contents in TABLE_STRUCTURES.items():
if not self.db.sql_exists(table):
with self.db.con:
self.db.cur.execute(contents)
def _insert_response(self, r, dealno=0, clean_type=0, table=EMAILS):
"""
r: response dictionary from self._quick_clean or self._deep_clean
dealno: default (0), the deal number of the deal for the data being processed.
clean_type: 0 = quick_clean, 1 = deep_clean
"""
sql = """INSERT INTO {} (email,email_status,free_mail,typo_fixed,dealno,clean_type)
VALUES ('{}','{}','{}','{}',{},{});""".format(
table, r[EMAIL], r[EMAIL_STATUS], r[FREE_MAIL], r[TYPO_FIXED], dealno, clean_type)
self.db.cur.execute(sql)
def _parse_valid_response(self, email, resp):
try:
if resp[EMAIL_STATUS] in [CLEAN, CATCHALL]:
return resp[EMAIL]
elif resp[EMAIL_STATUS] == PROCESSING:
return resp[EMAIL]
else:
return ''
except (KeyError,TypeError):
return ''
def get_domain(self, email):
"""
Either returns all characters after '@'
or returns None if no '@' exists.
"""
try:
return str(email).split('r@')[1]
except:
return None
def parse_email(self, email):
"""
Validates the basic components of an
email address with a few tests.
Returns the lowercased email address if tests pass.
Returns an empty string if a test fails.
"""
if not email:
return ''
email = str(email).lower().replace('.comhome','.com')
for item in EMAIL_CHARS_TO_SPLIT:
if item in email:
email = email.split(item)[0]
for item in ILLEGAL_SCRUB_ITEMS:
if item in email:
return ''
if not "@" in email:
return ''
elif not "." in email:
return ''
elif not len(email) > 5:
return ''
return email
def pre_process_frame(self, df, col=None):
"""Runs class method parse_email,
drops duplicates,
and then drops records with no email address. """
col = (EMAIL if not col else col)
df.loc[:,col] = df.loc[:,col].apply(self.parse_email)
df.drop_duplicates([col],inplace=True)
return self.drop_missing_emails(df,col=col)
def _quick_clean(self, email):
url = "https://api.listwisehq.com/clean/quick.php?email={}&api_key={}".format(email, self._api_key)
return requests.get(url).json()
def _deep_clean(self, email):
url = "https://api.listwisehq.com/clean/deep.php?email={}&api_key={}".format(email, self._api_key)
return requests.get(url).json()
def delete_email(self, email):
"""Deletes an email address from the emails table.
You must commit/rollback the transaction on your own."""
sql = "DELETE FROM emails WHERE email = '{}'".format(
email)
self.db.cur.execute(sql)
def check_db(self, email, clean_type=1):
"""
Checks the database for a matching clean/catchall status email address.
If one is found, it is returned as {'email': '<EMAIL>'}
Returns None if no match was found.
"""
try:
sql = """
SELECT * FROM emails WHERE email = '{}'
AND clean_type = {}
AND email_status IN('clean','catch-all')
LIMIT 1
""".format(email, clean_type)
self.db.cur.execute(sql)
resp = self.db.cur.fetchone()
if resp:
return {EMAIL:resp[EMAIL]}
except:
print("sql error: {}".format(sql))
return None
def db_clean_one(self, email, clean_type=1):
"""Cleans an email address by checking the local database (and thats it)
returning None if no match exists. """
res = self.check_db(email,clean_type=clean_type)
if res:
return res[EMAIL]
def quick_clean_one(self, email, dealno=0):
if not pd.notnull(email) or not email:
return email
resp = self._quick_clean(email)
#print("{}: {}".format(resp['email'],resp['email_status']))
error = resp.get(ERROR_CODE, None)
if error:
self._errored_responses.update({email:resp})
return email
self._insert_response(resp,dealno=dealno,clean_type=0)
return self._parse_valid_response(email,resp)
def quick_clean_one2(self, email, dealno=0):
resp = self.check_db(email, clean_type=0)
try:
return resp[EMAIL]
except:
return self.quick_clean_one(email,dealno=dealno)
def quick_clean_frame(self, df, email_col=None, clean_col='EMAIL_CLEANED', dealno=0):
email_col = (EMAIL if not email_col else email_col)
clean_col = (email_col if not clean_col else clean_col)
df.loc[:,clean_col] = df.loc[:,email_col].apply(self.parse_email).apply(self.quick_clean_one2,args=([dealno]))
self.db.con.commit()
return df
def deep_clean_one(self, email, dealno=0):
"""
Checks the email against the deep clean API and inserts the response into the database.
Note: The database must be committed after running this to make changes stick.
"""
if not pd.notnull(email) or not email:
return email
resp = self._deep_clean(email)
#print("{}: {}".format(resp['email'],resp['email_status']))
error = resp.get(ERROR_CODE,None)
if error:
self._errored_responses.update({email:resp})
return email
self._insert_response(resp, dealno=dealno, clean_type=1)
return self._parse_valid_response(email, resp)
def deep_clean_one2(self, email, dealno=0):
"""
Checks the email address against the database and tries to return a result.
If no result, reruns the email against the API.
Note: The database must be committed after running this to make changes stick.
"""
resp = self.check_db(email, clean_type=1)
try:
return resp[EMAIL]
except:
return self.deep_clean_one(email, dealno=dealno)
def deep_clean_frame(self, df, email_col=None, clean_col='EMAIL_CLEANED', dealno=0):
"""
Cleans a pandas.DataFrame using the deep_clean_one2 class method.
Cleaned emails are stored in the database for future use.
PARAMETERS:
============
df - pandas.DataFrame object with a column of email addresses
email_col - (string) of the name of the
column containing email addresses
clean_col - (string) of the name of the column to store
clean email addresses, or None to replace the email_col
dealno - (int) Defaults to 0, optionally store a deal number
with each email address cleaned.
"""
email_col = (EMAIL if not email_col else email_col)
if not clean_col:
clean_col = email_col
df.loc[:,clean_col] = df.loc[:,email_col].apply(self.parse_email).apply(self.deep_clean_one2,args=([dealno]))
self.db.con.commit()
return df
def deep_processing_rerun_all(self):
"""
Pulls records that are still in the processing status and reruns them against
the deep clean API. New responses are passed back into the database.
"""
sql = """SELECT * FROM emails
WHERE email_status = 'processing'
AND clean_type = 1"""
df = self.db.read_sql(sql)
for i in range(df.index.size):
rec = df.loc[i, :]
self.deep_clean_one(rec[EMAIL], dealno=rec['dealno'])
self.db.con.commit()
print('Reprocessed {} records that were stuck in the processing status'.format(df.index.size))
def deep_processing_rerun(self, dealno=0, thresh=0.05, max_tries=5):
"""Reprocesses records that are in 'processing' up to max_tries times
or until the count of unprocessed records is less than thresh. """
assert thresh < 1 and thresh > 0, "The threshold parameter should be a decimal less than 1 and greater than 0."
self.db.cur.execute("SELECT count(*) as count FROM emails WHERE dealno = {}".format(dealno))
count = self.db.cur.fetchone()['count']
thresh = thresh * count
sql = """SELECT * FROM emails
WHERE dealno = {}
AND email_status = 'processing'
""".format(dealno)
df_processing = self.db.read_sql(sql)
tries = 0
while df_processing.index.size > 0:
tries += 1
ct = df_processing.index.size
if tries > max_tries:
raise Exception("Tried to reprocess {}x with no luck...giving up.".format(max_tries))
if tries > 1:
print("Waiting 2 minutes before attempt #{} of reprocessing.".format(tries))
sleep(60 * 2)
if tries > 2 and ct < thresh:
break
if ct > 0:
print("Reprocessing {} records for deal {}".format(ct,dealno))
else:
return print("Found no records to reprocess")
df_processing = self.db.read_sql(sql)
self.deep_clean_frame(df_processing, email_col=EMAIL,clean_col=None,dealno=dealno)
print("Deep processing rerun completed successfully on deal {}".format(dealno))
def merge_email_frame(self, df, col=None, sql=None):
"""Merges a pandas.DataFrame with clean emails matching from the database.
As this database grows, this operation will take longer to select & join...
we need to just suppress the bad ones."""
sql = ("SELECT email,email_status from emails WHERE email_status IN('clean','catch-all')"
if not sql else sql)
col = (EMAIL if not col else col)
clean_df = self.db.read_sql(sql)
clean_df.rename(columns={EMAIL:col}, inplace=True)
df.loc[:,col] = df.loc[:,col].apply(self.parse_email)
df = df.dropna(subset=[col])
df = pd.merge(df, clean_df, how='inner', left_on=col, right_on=col)
return df[df[col] != '']
def suppress_email_frame(self, df, col='EMAIL', clean_type=1):
"""The opposite of deep_email_merge class method.
Bad emails are pulled from the database and suppressed from the original data."""
assert clean_type in (0,1), "Invalid clean_type {}, must be 0 or 1."
sql = """
SELECT email,email_status from emails
WHERE email_id NOT IN
(SELECT email_id
FROM emails
WHERE email_status IN('clean','catch-all'))
AND clean_type = {}
""".format(clean_type)
bad_df = self.db.read_sql(sql)
bad_df.rename(columns={EMAIL:col},inplace=True)
df.loc[:,col] = df.loc[:,col].apply(self.parse_email)
df = df.dropna(subset=[col])
df2 = pd.merge(df, bad_df, how='left', left_on=col, right_on=col)
statuses = [x for x in df2.loc[:,'email_status'].unique() if | pd.notnull(x) | pandas.notnull |
import numpy as np
import pandas as pd
from anubis.models import TheiaSession, Assignment
def get_theia_sessions(course_id: str = None) -> pd.DataFrame:
"""
Get all theia session objects, and throw them into a dataframe
:return:
"""
# Get all the theia session sqlalchemy objects
if course_id is not None:
raw_theia_sessions = TheiaSession.query.join(Assignment).filter(Assignment.course_id == course_id).all()
else:
raw_theia_sessions = TheiaSession.query.filter(TheiaSession.playground == True).all()
# Specify which columns we want
columns = ["id", "owner_id", "assignment_id", "image_id", "created", "ended"]
# Build a dataframe of from the columns we pull out of each theia session object
theia_sessions = pd.DataFrame(
data=list(
map(
lambda x: ({column: getattr(x, column) for column in columns}),
raw_theia_sessions,
)
),
columns=columns,
)
# Round the timestamps to the nearest hour
theia_sessions["created"] = theia_sessions["created"].apply(lambda date: | pd.to_datetime(date) | pandas.to_datetime |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import json
import pandas as pd
import sys
import signal
import time
fname = sys.argv[1]
plt.ion()
fig = plt.figure()
def readStats():
f = open(fname, 'r')
m = json.load(f)
f.close()
plt.clf()
data = | pd.DataFrame(m['heap']) | pandas.DataFrame |
from .neural_model import NeuralModel
from warnings import warn, catch_warnings
import numpy as np
import pandas as pd
from sklearn.linear_model import PoissonRegressor
from tqdm import tqdm
class PoissonGLM(NeuralModel):
def __init__(self, design_matrix, spk_times, spk_clu,
binwidth=0.02, metric='dsq', model='default', alpha=0,
train=0.8, blocktrain=False, mintrials=100, subset=False):
super().__init__(design_matrix, spk_times, spk_clu,
binwidth, train, blocktrain, mintrials, subset)
self.metric = metric
if model == 'default':
self.fit_intercept = True
elif model == 'no_intercept':
self.fit_intercept = False
else:
raise ValueError('model must be \'default\' or \'no_intercept\'')
self.alpha = alpha
def _fit(self, dm, binned, cells=None, noncovwarn=False):
"""
Fit a GLM using scikit-learn implementation of PoissonRegressor. Uses a regularization
strength parameter alpha, which is the strength of ridge regularization term.
Parameters
----------
dm : numpy.ndarray
Design matrix, in which rows are observations and columns are regressor values. Should
NOT contain a bias column for the intercept. Scikit-learn handles that.
binned : numpy.ndarray
Vector of observed spike counts which we seek to predict. Must be of the same length
as dm.shape[0]
alpha : float
Regularization strength, applied as multiplicative constant on ridge regularization.
cells : list
List of cells which should be fit. If None is passed, will default to fitting all cells
in clu_ids
"""
if cells is None:
cells = self.clu_ids.flatten()
coefs = pd.Series(index=cells, name='coefficients', dtype=object)
intercepts = | pd.Series(index=cells, name='intercepts') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 18:03:13 2017
@author: lfiorito
"""
import pdb
import os
import logging
from collections import Counter
from functools import reduce
import numpy as np
import pandas as pd
from sandy.formats.records import read_cont
from sandy.formats import (mf1,
mf3,
mf4,
mf5,
mf8,
mf33,
mf34,
mf35,
)
from sandy.formats.utils import (
Xs,
Lpc,
Fy,
XsCov,
EdistrCov,
LpcCov,
triu_matrix,
corr2cov,
)
from sandy.settings import SandyError
from sandy.functions import find_nearest
__author__ = "<NAME>"
__all__ = ["Endf6", "Errorr", "Gendf"]
#def split_endf(text):
# """
# Read ENDF-6 formatted file and split it into columns based on field widths:
# C1 C2 L1 L2 N1 N2 MAT MF MT
# 11 11 11 11 11 11 4 2 3.
# Store list in dataframe.
# """
# from io import StringIO
# def read_float(x):
# try:
# return float(x[0] + x[1:].replace('+', 'E+').replace('-', 'E-'))
# except:
# return x
# widths = [11,11,11,11,11,11,4,2,3]
# columns = ["C1", "C2", "L1", "L2", "N1", "N2","MAT", "MF", "MT"]
# converters = dict(zip(columns[:6],[read_float]*6))
# frame = pd.read_fwf(StringIO(text), widths=widths, names=columns, converters=converters)
# return frame.query("MAT>0 & MF>0 & MT>0")
#
#
class _BaseFile(pd.DataFrame):
"""This class is to be inherited by all classes that parse and analyze
nuclear data evaluated files in ENDF-6 or derived (ERRORR) formats.
**Index**:
- MAT : (`int`) MAT number to identify the isotope
- MF : (`int`) MF number to identify the data type
- MT : (`int`) MT number to identify the reaction
**Columns**:
- TEXT : (`string`) MAT/MF/MT section reported as a single string
Attributes
----------
labels : `list` of `str`
index labels MAT, MT and MT
Methods
-------
add_sections
Collapse two tapes into a single one
delete_sections
Delete sections from the dataframe
filter_by
Filter dataframe based on MAT, MF, MT lists
from_file
Create dataframe by reading a endf6 file
from_text
Create dataframe from endf6 text in string
Raises
------
`SandyError`
if the tape is empty
`SandyError`
if the same combination MAT/MF/MT is found more than once
"""
labels = ['MAT', 'MF', 'MT']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.empty:
raise SandyError("tape is empty")
self.index.names = self.labels
self.columns = ["TEXT"]
self.sort_index(level=self.labels, inplace=True)
if self.index.duplicated().any():
raise SandyError("found duplicate MAT/MF/MT")
@classmethod
def from_file(cls, file):
"""Create dataframe by reading a file.
Parameters
----------
file : `str`
file name
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Dataframe containing ENDF6 data grouped by MAT/MF/MT
"""
with open(file) as f:
text = f.read()
return cls.from_text(text)
@classmethod
def from_text(cls, text):
"""Create dataframe from endf6 text in string.
Parameters
----------
text : `str`
string containing the evaluated data
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Dataframe containing ENDF6 data grouped by MAT/MF/MT
"""
from io import StringIO
tape = pd.read_fwf(
StringIO(text),
widths = [66, 4, 2, 3],
names = ["TEXT", "MAT", "MF", "MT"],
converters = {"MAT" : np.int, "MF" : np.int, "MT" : np.int},
usecols = cls.labels
)
tape["TEXT"] = text.splitlines(True)
tape = tape.loc[(tape.MAT>0) & (tape.MF>0) & (tape.MT>0)]. \
groupby(cls.labels). \
apply(lambda x: "".join(x.TEXT.values)). \
to_frame()
return cls(tape)
def add_sections(self, tape):
"""Collapse two tapes into a single one.
If MAT/MF/MT index is present in both tapes, take it from the second.
Parameters
----------
tape : `sandy.formats.endf6.BaseFile` or derived instance
dataframe for ENDF-6 formatted file
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
dataframe with merged content
"""
outdf = pd.concat([pd.DataFrame(self), tape]). \
reset_index(). \
drop_duplicates(self.labels, keep='last'). \
set_index(self.labels)
return self.__class__(outdf)
def delete_sections(self, *tuples):
"""Given a sequence of tuples (MAT,MF,MT), delete the corresponding sections
from the dataframe.
Parameters
----------
tuples : sequence of `tuple`
each tuple should have the format (MAT, MF, MT)
To delete, say, a given MF independentently from the MAT and MT, assign `None`
to the MAT and MT position in the tuple.
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
dataframe without given sections
"""
queries = []
for mat,mf,mt in tuples:
conditions = []
if mat is not None:
conditions.append("MAT == {}".format(mat))
if mf is not None:
conditions.append("MF == {}".format(mf))
if mt is not None:
conditions.append("MT == {}".format(mt))
if not conditions:
continue
queries.append("not (" + " & ".join(conditions) + ")")
if not queries:
logging.warn("given MAT/MF/MT sections were not found")
return self
else:
query = " & ".join(queries)
newdf = self.query(query)
return self.__class__(newdf)
def filter_by(self, listmat=None, listmf=None, listmt=None):
"""Filter dataframe based on MAT, MF, MT lists.
Parameters
----------
listmat : `list` or `None`
list of requested MAT values (default is `None`: use all MAT)
listmf : `list` or `None`
list of requested MF values (default is `None`: use all MF)
listmt : `list` or `None`
list of requested MT values (default is `None`: use all MT)
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Copy of the original instance with filtered MAT, MF and MT sections
"""
_listmat = range(1,10000) if listmat is None else listmat
_listmf = range(1,10000) if listmf is None else listmf
_listmt = range(1,10000) if listmt is None else listmt
cond_mat = self.index.get_level_values("MAT").isin(_listmat)
cond_mf = self.index.get_level_values("MF").isin(_listmf)
cond_mt = self.index.get_level_values("MT").isin(_listmt)
df = self.loc[cond_mat & cond_mf & cond_mt]
return self.__class__(df)
@property
def mat(self):
return sorted(self.index.get_level_values("MAT").unique())
@property
def mf(self):
return sorted(self.index.get_level_values("MF").unique())
@property
def mt(self):
return sorted(self.index.get_level_values("MT").unique())
def get_file_format(self):
"""Determine ENDF-6 format type by reading flags "NLIB" and "LRP" of first MAT in file:
* `NLIB = -11 | NLIB = -12` : errorr
* `NLIB = -1` : gendf
* `LRP = 2` : pendf
* `LRP != 2` : endf6
Returns
-------
`str`
type of ENDF-6 format
"""
lines = self.TEXT.loc[self.mat[0], 1, 451].splitlines()
C, i = read_cont(lines, 0)
if C.N1 == -11 or C.N1 == -12:
ftype = "errorr"
elif C.N1 == -1:
ftype = "gendf"
else:
if C.L1 == 2:
ftype = "pendf"
else:
ftype = "endf6"
return ftype
class Endf6(_BaseFile):
"""Class to contain the content of ENDF-6 files, grouped by MAT/MF/MT.
**Index**:
- MAT : (`int`) MAT number to identify the isotope
- MF : (`int`) MF number to identify the data type
- MT : (`int`) MT number to identify the reaction
**Columns**:
- TEXT : (`string`) MAT/MF/MT section reported as a single string
Methods
-------
"""
def get_nsub(self):
"""Determine ENDF-6 sub-library type by reading flag "NSUB" of first MAT in file:
* `NSUB = 10` : Incident-Neutron Data
* `NSUB = 11` : Neutron-Induced Fission Product Yields
Returns
-------
`int`
NSUB value
"""
return self.read_section(self.mat[0], 1, 451)["NSUB"]
def read_section(self, mat, mf, mt):
"""Parse MAT/MF/MT section.
"""
if mf == 1:
foo = mf1.read
elif mf == 3:
foo = mf3.read
elif mf == 4:
foo = mf4.read
elif mf == 5:
foo = mf5.read
elif mf == 8:
foo = mf8.read
elif mf == 33 or mf == 31:
foo = mf33.read
elif mf == 34:
foo = mf34.read
elif mf == 35:
foo = mf35.read
else:
raise SandyError("SANDY cannot parse section MAT{}/MF{}/MT{}".format(mat,mf,mt))
if (mat,mf,mt) not in self.index:
raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format(mat,mf,mt))
return foo(self.loc[mat,mf,mt].TEXT)
def write_string(self, title=" "*66, skip_title=False, skip_fend=False):
"""Collect all rows in `Endf6` and write them into string.
Parameters
----------
title : `str`
title of the file
skip_title : `bool`
do not write the title
skip_fend : `bool`
do not write the last FEND line
Returns
-------
`str`
"""
from .records import write_cont
tape = self.copy()
string = ""
if not skip_title:
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(title, 1, 0, 0, 0)
for mat,dfmat in tape.groupby('MAT', sort=True):
for mf,dfmf in dfmat.groupby('MF', sort=True):
for mt,dfmt in dfmf.groupby('MT', sort=True):
for text in dfmt.TEXT:
string += text.encode('ascii', 'replace').decode('ascii')
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), int(mat), int(mf), 0, 99999)
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), int(mat), 0, 0, 0)
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), 0, 0, 0, 0)
if not skip_fend:
string += "{:<66}{:4}{:2}{:3}{:5}".format(*write_cont(*[0]*6), -1, 0, 0, 0)
return string
def get_xs(self, listmat=None, listmt=None):
""" Extract selected cross sections (xs).
xs are linearized on unique grid.
Missing points are linearly interpolated (use zero when out of domain).
Conditions:
- Interpolation law must be lin-lin
- No duplicate points on energy grid
"""
condition = self.index.get_level_values("MF") == 3
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
ListXs = []
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
xs = pd.Series(X["XS"], index=X["E"], name=(X["MAT"],X["MT"])).rename_axis("E").to_frame()
duplicates = [x for x, count in Counter(xs.index).items() if count > 1]
if duplicates:
raise SandyError('duplicate energy points found for MAT{}/MF{}/MT{}\n'.format(*ix) +
'\n'.join(map(str,duplicates)))
if X['INT'] != [2]:
raise SandyError('MAT{}/MF{}/MT{} interpolation scheme is not lin-lin'.format(*ix))
ListXs.append(xs)
if not ListXs:
logging.warn("requested cross sections were not found")
return pd.DataFrame()
frame = reduce(lambda left,right : pd.merge(left, right, left_index=True, right_index=True, how='outer'), ListXs).sort_index().interpolate(method='slinear', axis=0).fillna(0)
return Xs(frame)
def update_xs(self, xsFrame):
from .mf3 import write
tape = self.copy()
mf = 3
for (mat,mt),xsSeries in xsFrame.iteritems():
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
# Cut threshold xs
ethresh = sec["E"][0]
xsSeries = xsSeries.where(xsSeries.index >= ethresh).dropna()
# iNotZero = next((i for i,x in enumerate(xsSeries) if x), None)
# if iNotZero > 0: xsSeries = xsSeries.iloc[iNotZero-1:]
sec["E"] = xsSeries.index.values
sec["XS"] = xsSeries.values
# Assume all xs have only 1 interpolation region and it is linear
sec["NBT"] = [xsSeries.size]
sec["INT"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def get_nubar(self, listmat=None, listmt=None):
"""
Extract selected nubar.
nubar are linearized on unique grid.
Missing points are linearly interpolated (use zero when out of domain).
Conditions:
- Interpolation law must be lin-lin
- No duplicate points on energy grid
"""
condition = self.index.get_level_values("MF") == 1
tape = self[condition]
conditions = [tape.index.get_level_values("MT") == x for x in [452, 455, 456]]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
# query = "MF==1 & (MT==452 | MT==455 | MT==456)"
# if listmat is not None:
# query_mats = " | ".join(["MAT=={}".format(x) for x in listmat])
# query += " & ({})".format(query_mats)
# if listmt is not None:
# query_mts = " | ".join(["MT=={}".format(x) for x in listmt])
# query += " & ({})".format(query_mts)
# tape = self.query(query)
ListXs = []
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
xs = pd.Series(X["NUBAR"], index=X["E"], name=(X["MAT"],X["MT"])).rename_axis("E").to_frame()
duplicates = [x for x, count in Counter(xs.index).items() if count > 1]
if duplicates:
raise SandyError('duplicate energy points found for MAT{}/MF{}/MT{}\n'.format(*ix) +
'\n'.join(map(str,duplicates)))
if X['INT'] != [2]:
raise SandyError('MAT{}/MF{}/MT{} interpolation scheme is not lin-lin'.format(*ix))
ListXs.append(xs)
if not ListXs:
logging.warn("no fission neutron multiplicity was found")
return pd.DataFrame()
frame = reduce(lambda left,right : pd.merge(left, right, left_index=True, right_index=True, how='outer'), ListXs).sort_index().interpolate(method='slinear', axis=0).fillna(0)
return Xs(frame)
def update_nubar(self, xsFrame):
from .mf1 import write
tape = self.copy()
mf = 1
for (mat,mt),S in xsFrame.iteritems():
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
# Cut threshold xs
iNotZero = next((i for i,x in enumerate(S) if x), None)
if iNotZero > 0: S = S.iloc[iNotZero-1:]
sec["E"] = S.index.values
sec["NUBAR"] = S.values
# Assume all xs have only 1 interpolation region and it is linear
sec["NBT"] = [S.size]
sec["INT"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def update_edistr(self, edistrFrame):
from .mf5 import write
mf = 5
tape = self.copy()
for (mat,mt),S in edistrFrame.groupby(["MAT","MT"]):
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
for k,S in S.groupby(["K"]):
if sec["PDISTR"][k]["LF"] != 1: continue
ein_orig = sorted(sec["PDISTR"][k]["EIN"].keys())
for ein in S.index.get_level_values("EIN"):
edistr = S.loc[mat,mt,k,ein].values
eout = S.loc[mat,mt,k,ein].index.values
ein_found = find_nearest(ein_orig, ein)[1]
mask = np.in1d(eout, sec["PDISTR"][k]["EIN"][ein_found]["EOUT"])
edistr = edistr[mask]
eout = eout[mask]
dict_distr = {"EDISTR" : edistr,
"EOUT" : eout,
"NBT" : [len(eout)],
"INT" : [2]}
sec["PDISTR"][k]["EIN"].update({ein : dict_distr})
sec["PDISTR"][k]["NBT_EIN"] = [len(sec["PDISTR"][k]["EIN"])]
sec["PDISTR"][k]["INT_EIN"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def get_edistr_cov(self, listmat=None, listmt=None):
condition = self.index.get_level_values("MF") == 35
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
# query = "MF==35"
# if listmat is not None:
# query_mats = " | ".join(["MAT=={}".format(x) for x in listmat])
# query += " & ({})".format(query_mats)
# if listmt is not None:
# query_mts = " | ".join(["MT=={}".format(x) for x in listmt])
# query += " & ({})".format(query_mts)
# tape = self.query(query)
List = []; eg = set()
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
mat = X['MAT']; mt = X['MT']
for sub in X["SUB"].values():
# Ek grid is one unit longer than covariance.
Ek = np.array(sub["EK"])
Fkk = np.array(sub["FKK"])
NE = sub["NE"]
cov = triu_matrix(Fkk, NE-1)
# Normalize covariance matrix dividing by the energy bin.
dE = 1./(Ek[1:]-Ek[:-1])
cov = corr2cov(cov, dE)
# Add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
cov = pd.DataFrame(cov, index=Ek, columns=Ek)
eg |= set(cov.index.values)
List.append([mat, mt, sub["ELO"], sub["EHI"], cov])
if not List:
logging.warn("no energy distribution covariance found")
return pd.DataFrame()
frame = pd.DataFrame(List, columns=('MAT', 'MT', 'ELO', 'EHI', 'COV'))
eg = sorted(eg)
frame.COV = frame.COV.apply(lambda x:cov_interp(x, eg))
# From here, the method is identical to Errorr.get_cov()
# Except that the size of eg is equal to the size of each matrix (we include the value for 2e7)
# and that the indexes are different
MI = [(mat,mt,elo,ehi,e) for mat,mt,elo,ehi in sorted(set(zip(frame.MAT, frame.MT, frame.ELO, frame.EHI))) for e in eg]
index = pd.MultiIndex.from_tuples(MI, names=("MAT", "MT", 'ELO', 'EHI', "EOUT"))
# initialize union matrix
matrix = np.zeros((len(index),len(index)))
for i,row in frame.iterrows():
ix = index.get_loc((row.MAT,row.MT,row.ELO,row.EHI))
ix1 = index.get_loc((row.MAT,row.MT,row.ELO,row.EHI))
matrix[ix.start:ix.stop,ix1.start:ix1.stop] = row.COV
i_lower = np.tril_indices(len(index), -1)
matrix[i_lower] = matrix.T[i_lower] # make the matrix symmetric
return EdistrCov(matrix, index=index, columns=index)
def get_lpc(self, listmat=None, listmt=None, verbose=True):
condition = self.index.get_level_values("MF") == 4
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
DictLpc = {}
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
if "LPC" not in X: continue
if X["LPC"]["INT"] != [2]:
if verbose:
logging.warn("found non-linlin interpolation, skip angular distr. for MAT{}/MF{}/MT{}".format(*ix))
continue
for e,v in X["LPC"]["E"].items():
DictLpc.update({(X["MAT"], X["MT"],e) : pd.Series([1]+v["COEFF"])})
if not DictLpc:
logging.warn("no angular distribution in Legendre expansion was found")
return pd.DataFrame()
frame = pd.DataFrame.from_dict(DictLpc, orient="index")
return Lpc(frame)
def update_lpc(self, lpcFrame):
from .mf4 import write
mf = 4
tape = self.copy()
for (mat,mt),S in lpcFrame.groupby(["MAT","MT"]):
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
if "LPC" not in sec: continue
for e in S.loc[mat,mt].index:
if e in sec["LPC"]["E"]:
T = sec["LPC"]["E"][e]["T"]
LT = sec["LPC"]["E"][e]["LT"]
else:
T = LT = 0
coeff = S.loc[mat,mt,e].dropna().values[1:]
if len(coeff) == 0: continue
dict_distr = {"COEFF" : coeff, "LT" : LT, "T" : T}
sec["LPC"]["E"].update({e : dict_distr})
sec["LPC"]["NBT"] = [len(sec["LPC"]["E"])]
sec["LPC"]["INT"] = [2]
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def get_lpc_cov(self, listmat=None, listmt=None):
condition = self.index.get_level_values("MF") == 34
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
List = []; eg = set()
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
mat = X['MAT']; mt = X['MT']
for (mat1,mt1),rsec in X["REAC"].items():
if mat1 == 0: mat1 = mat;
for (l,l1),psec in rsec["P"].items():
covs = []
for nisec in psec["NI"].values():
if nisec["LB"] == 5:
Fkk = np.array(nisec["FKK"])
if nisec["LS"] == 0: # to be tested
cov = Fkk.reshape(nisec["NE"]-1, nisec["NE"]-1)
else:
cov = triu_matrix(Fkk, nisec["NE"]-1)
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
e1 = e2 = nisec["EK"]
elif nisec["LB"] == 1:
cov = np.diag(nisec["FK"])
e1 = e2 = nisec["EK"]
elif nisec["LB"] == 2:
f = np.array(nisec["FK"])
cov = f*f.reshape(-1,1)
e1 = e2 = nisec["EK"]
elif nisec["LB"] == 6:
cov = np.array(nisec["FKL"]).reshape(nisec["NER"]-1, nisec["NEC"]-1)
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
e1 = nisec["EK"]
e2 = nisec["EL"]
else:
logging.warn("skipped NI-type covariance with flag LB={} for MAT{}/MF{}/MT{}".format(nisec["LB"], *ix))
continue
cov = pd.DataFrame(cov, index=e1, columns=e2)
covs.append(cov)
if len(covs) == 0:
continue
cov = reduce(lambda x, y: x.add(y, fill_value=0).fillna(0), covs).fillna(0)
eg |= set(cov.index.values)
List.append([mat, mt, l, mat1, mt1, l1, cov])
if not List:
logging.warn("no MF34 covariance found")
return pd.DataFrame()
frame = pd.DataFrame(List, columns=('MAT', 'MT', 'L', 'MAT1', 'MT1', 'L1', 'COV'))
eg = sorted(eg)
frame.COV = frame.COV.apply(lambda x:cov_interp(x, eg))
# From here, the method is identical to Errorr.get_cov()
# Except that the size of eg is equal to the size of each matrix (we include the value for 2e7)
# and that the indexes are different
MI = [(mat,mt,l,e) for mat,mt,l in sorted(set(zip(frame.MAT, frame.MT, frame.L))) for e in eg]
index = pd.MultiIndex.from_tuples(MI, names=("MAT", "MT", "L", "E"))
# initialize union matrix
matrix = np.zeros((len(index),len(index)))
for i,row in frame.iterrows():
ix = index.get_loc((row.MAT,row.MT,row.L))
ix1 = index.get_loc((row.MAT1,row.MT1,row.L1))
matrix[ix.start:ix.stop,ix1.start:ix1.stop] = row.COV
i_lower = np.tril_indices(len(index), -1)
matrix[i_lower] = matrix.T[i_lower] # make the matrix symmetric
return LpcCov(matrix, index=index, columns=index)
def update_tpd(self, tpdFrame):
from .mf4 import write
mf = 4
tape = self.copy()
for (mat,mt),S in tpdFrame.groupby(["MAT","MT"]):
if (mat,mf,mt) not in self.index: continue
sec = self.read_section(mat,mf,mt)
pdb.set_trace()
if "TAB" in sec: del sec["TAB"]
if "LPC" in sec: del sec["LPC"]
sec["TAB"] = {}
sub = {}
for e in S.loc[mat,mt].index:
tab = S.loc[mat,mt,e]
T = LT = 0 # do not deal with this
pdb.set_trace()
distr = {"T" : T, "LT" : LT, "NBT" : [len(tab)], "INT" : [2], "MU" : tab.index, "ADISTR" : tab.values}
sub.update({e : distr})
sec["TAB"].update({"E" : sub})
sec["TAB"].update({"NBT" : [len(sec["TAB"]["E"])]})
sec["TAB"].update({"INT" : [2]})
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def get_fy(self, listenergy=None, listmat=None, listmt=None):
"""Extract selected fission yields.
xs are linearized on unique grid.
"""
condition = self.index.get_level_values("MF") == 8
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
listfy = []
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
for e,esec in X["E"].items():
if listenergy is not None:
if e not in listenergy:
continue
for fy in esec["FY"].values():
zam = fy["ZAFP"]*10 + fy["FPS"]
fydict = {"MAT" : ix[0], "MT" : ix[2], "E" : e, "ZAM" : zam, "YI" : fy["YI"], "DYI" : fy["DYI"]}
listfy.append(fydict)
if not listfy:
logging.warn("requested fission yields were not found")
return pd.DataFrame()
frame = pd.DataFrame.from_dict(listfy).set_index(["MAT","MT","E","ZAM"])
return Fy(frame)
def update_fy(self, fyFrame):
"""Update fy sections of `Endf6` instance with new data coming from
a `Fy` instance.
Parameters
----------
fyFrame : `sandy.Fy`
tabulated fission yields
Returns
-------
`sandy.Endf6`
"""
from .mf8 import write
tape = self.copy()
mf = 8
for (mat,mt),df in fyFrame.groupby(["MAT","MT"]):
if (mat,mf,mt) not in self.index:
continue
sec = self.read_section(mat,mf,mt)
for e,esec in sec["E"].items():
if (mat, mt, e) not in fyFrame.index:
continue
newfy = fyFrame.loc[mat,mt,e]
for zam, row in newfy.iterrows():
if zam in esec["FY"]:
sec["E"][e]["FY"][zam]["YI"] = row.YI
text = write(sec)
tape.loc[mat,mf,mt].TEXT = text
return Endf6(tape)
def update_info(self, descr=None):
"""Update RECORDS item (in DATA column) for MF1/MT451 of each MAT based on the content of the TEXT column.
"""
from .mf1 import write
tape = self.copy()
for mat in sorted(tape.index.get_level_values('MAT').unique()):
sec = self.read_section(mat,1,451)
records = | pd.DataFrame(sec["RECORDS"], columns=["MF","MT","NC","MOD"]) | pandas.DataFrame |
# !/usr/bin/env python
# coding: utf-8
"""
Some utility functions aiming to analyse OSM data
"""
import datetime as dt
from datetime import timedelta
import re
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
from osmdq.extract_user_editor import editor_name
### OSM data exploration ######################
def updatedelem(data):
"""Return an updated version of OSM elements
Parameters
----------
data: df
OSM element timeline
"""
updata = data.groupby(['elem','id'])['version'].max().reset_index()
return pd.merge(updata, data, on=['id','version'])
def datedelems(history, date):
"""Return an updated version of history data at date
Parameters
----------
history: df
OSM history dataframe
date: datetime
date in datetime format
"""
datedelems = (history.query("ts <= @date")
.groupby(['elem','id'])['version']
.max()
.reset_index())
return pd.merge(datedelems, history, on=['elem','id','version'])
def osm_stats(osm_history, timestamp):
"""Compute some simple statistics about OSM elements (number of nodes,
ways, relations, number of active contributors, number of change sets
Parameters
----------
osm_history: df
OSM element up-to-date at timestamp
timestamp: datetime
date at which OSM elements are evaluated
"""
osmdata = datedelems(osm_history, timestamp)
nb_nodes = len(osmdata.query('elem=="node"'))
nb_ways = len(osmdata.query('elem=="way"'))
nb_relations = len(osmdata.query('elem=="relation"'))
nb_users = osmdata.uid.nunique()
nb_chgsets = osmdata.chgset.nunique()
return [nb_nodes, nb_ways, nb_relations, nb_users, nb_chgsets]
def osm_chronology(history, start_date, end_date=dt.datetime.now()):
"""Evaluate the chronological evolution of OSM element numbers
Parameters
----------
history: df
OSM element timeline
"""
timerange = pd.date_range(start_date, end_date, freq="1M").values
osmstats = [osm_stats(history, str(date)) for date in timerange]
osmstats = pd.DataFrame(osmstats, index=timerange,
columns=['n_nodes', 'n_ways', 'n_relations',
'n_users', 'n_chgsets'])
return osmstats
### OSM metadata extraction ####################
def group_count(metadata, data, grp_feat, res_feat, namesuffix):
"""Group-by 'data' by 'grp_feat' and element type features, count element
corresponding to each grp_feat-elemtype tuples and merge them into metadata
table
Parameters
----------
metadata: df
Dataframe that will integrate the new features
data: df
Dataframe from where information is grouped
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
to the criterion)
namesuffix: object
string that ends the new feature name
"""
md_ext = (data.groupby([grp_feat, 'elem'])[res_feat]
.count()
.unstack()
.reset_index()
.fillna(0))
md_ext['elem'] = md_ext[['node','relation','way']].apply(sum, axis=1)
md_ext = md_ext[[grp_feat, 'elem', 'node', 'way', 'relation']]
colnames = "n_" + md_ext.columns.values[-4:] + namesuffix
md_ext.columns = [grp_feat, *colnames]
return pd.merge(metadata, md_ext, on=grp_feat, how='outer').fillna(0)
def group_nunique(metadata, data, grp_feat, res_feat, namesuffix):
"""Group-by 'data' by 'grp_feat' and element type features, count unique
element corresponding to each grp_feat-elemtype tuples and merge them into
metadata table
Parameters
----------
metadata: df
Dataframe that will integrate the new features
data: df
Dataframe from where information is grouped
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
to the criterion)
namesuffix: object
string that ends the new feature name
"""
md_ext = (data.groupby([grp_feat, 'elem'])[res_feat]
.nunique()
.unstack()
.reset_index()
.fillna(0))
md_ext['elem'] = md_ext[['node','relation','way']].apply(sum, axis=1)
md_ext = md_ext[[grp_feat, 'elem', 'node', 'way', 'relation']]
colnames = "n_" + md_ext.columns.values[-4:] + namesuffix
md_ext.columns = [grp_feat, *colnames]
return | pd.merge(metadata, md_ext, on=grp_feat, how='outer') | pandas.merge |
import text_process
import os
import sys
import gzip
import json
import argparse
import itertools
import _thread
import threading
sys.path.append(os.getcwd())
import pandas as pd
import numpy as np
def get_df(path):
""" Apply raw data to pandas DataFrame. """
idx = 0
df = {}
g = gzip.open(path, 'rb')
for line in g:
df[idx] = json.loads(line)
idx += 1
return pd.DataFrame.from_dict(df, orient='index')
def extraction(meta_path, review_df, stop_words, count):
""" Extracting useful infromation. """
with gzip.open(meta_path, 'rb') as g:
categories, also_viewed = {}, {}
for line in g:
line = eval(line)
asin = line['asin']
if 'category' in line:
categories[asin] = [line['category']]
elif 'categories' in line:
categories[asin] = line['categories']
else:
raise Exception('category or categories tag not in metadata')
related = line['related'] if 'related' in line else None
# fill the also_related dictionary
also_viewed[asin] = []
relations = ['also_viewed', 'buy_after_viewing']
if related:
also_viewed[asin] = [related[r] for r in relations if r in related]
also_viewed[asin] = itertools.chain.from_iterable(also_viewed[asin])
queries, reviews = [], []
for i in range(len(review_df)):
asin = review_df['asin'][i]
review = review_df['reviewText'][i]
category = categories[asin]
# process queries
qs = map(text_process._remove_dup,
map(text_process._remove_char, category))
qs = [[w for w in q if w not in stop_words] for q in qs]
# process reviews
review = text_process._remove_char(review)
review = [w for w in review if w not in stop_words]
queries.append(qs)
reviews.append(review)
review_df['query_'] = queries # write query result to dataframe
# filtering words counts less than count
reviews = text_process._filter_words(reviews, count)
review_df['reviewText'] = reviews
return review_df, also_viewed
def reindex(df):
""" Reindex the reviewID from 0 to total length. """
reviewer = df['reviewerID'].unique()
reviewer_map = {r: i for i, r in enumerate(reviewer)}
userIDs = [reviewer_map[df['reviewerID'][i]] for i in range(len(df))]
df['userID'] = userIDs
return df
def split_data(df, max_users_per_product, max_products_per_user):
""" Enlarge the dataset with the corresponding user-query-item pairs."""
df_enlarge = {}
i = 0
for row in range(len(df)):
for q in df['query_'][row]:
df_enlarge[i] = {'reviewerID': df['reviewerID'][row],
'userID': df['userID'][row], 'query_': q,
'asin': df['asin'][row], 'reviewText': df['reviewText'][row],
'gender': df['gender'][row] if FLAGS.is_clothing else None}
i += 1
df_enlarge = | pd.DataFrame.from_dict(df_enlarge, orient='index') | pandas.DataFrame.from_dict |
import kabuki
import hddm
import numpy as np
import pandas as pd
from numpy.random import rand
from scipy.stats import uniform, norm
from copy import copy
def gen_single_params_set(include=()):
"""Returns a dict of DDM parameters with random values for a singel conditin
the function is used by gen_rand_params.
:Optional:
include : tuple
Which optional parameters include. Can be
any combination of:
* 'z' (bias, default=0.5)
* 'sv' (inter-trial drift variability)
* 'sz' (inter-trial bias variability)
* 'st' (inter-trial non-decision time variability)
Special arguments are:
* 'all': include all of the above
* 'all_inter': include all of the above except 'z'
"""
params = {}
if include == 'all':
include = ['z','sv','sz','st']
elif include == 'all_inter':
include = ['sv','sz','st']
params['sv'] = 2.5*rand() if 'sv' in include else 0
params['sz'] = rand()* 0.4 if 'sz' in include else 0
params['st'] = rand()* 0.35 if 'st' in include else 0
params['z'] = .4+rand()*0.2 if 'z' in include else 0.5
# Simple parameters
params['v'] = (rand()-.5)*8
params['t'] = 0.2+rand()*0.3
params['a'] = 0.5+rand()*1.5
if 'pi' in include or 'gamma' in include:
params['pi'] = max(rand()*0.1,0.01)
# params['gamma'] = rand()
assert hddm.utils.check_params_valid(**params)
return params
def gen_rand_params(include=(), cond_dict=None, seed=None):
"""Returns a dict of DDM parameters with random values.
:Optional:
include : tuple
Which optional parameters include. Can be
any combination of:
* 'z' (bias, default=0.5)
* 'sv' (inter-trial drift variability)
* 'sz' (inter-trial bias variability)
* 'st' (inter-trial non-decision time variability)
Special arguments are:
* 'all': include all of the above
* 'all_inter': include all of the above except 'z'
cond_dict : dictionary
cond_dict is used when multiple conditions are desired.
the dictionary has the form of {param1: [value_1, ... , value_n], param2: [value_1, ... , value_n]}
and the function will output n sets of parameters. each set with values from the
appropriate place in the dictionary
for instance if cond_dict={'v': [0, 0.5, 1]} then 3 parameters set will be created.
the first with v=0 the second with v=0.5 and the third with v=1.
seed: float
random seed
Output:
if conditions is None:
params: dictionary
a dictionary holding the parameters values
else:
cond_params: a dictionary holding the parameters for each one of the conditions,
that has the form {'c1': params1, 'c2': params2, ...}
it can be used directly as an argument in gen_rand_data.
merged_params:
a dictionary of parameters that can be used to validate the optimization
and learning algorithms.
"""
#set seed
if seed is not None:
np.random.seed(seed)
#if there is only a single condition then we can use gen_single_params_set
if cond_dict is None:
return gen_single_params_set(include=include)
#generate original parameter set
org_params = gen_single_params_set(include)
#create a merged set
merged_params = org_params.copy()
for name in cond_dict.keys():
del merged_params[name]
cond_params = {};
n_conds = len(list(cond_dict.values())[0])
for i in range(n_conds):
#create a set of parameters for condition i
#put them in i_params, and in cond_params[c#i]
i_params = org_params.copy()
for name in cond_dict.keys():
i_params[name] = cond_dict[name][i]
cond_params['c%d' %i] = i_params
#update merged_params
merged_params['%s(c%d)' % (name, i)] = cond_dict[name][i]
return cond_params, merged_params
####################################################################
# Functions to generate RT distributions with specified parameters #
####################################################################
def gen_rts(size=1000, range_=(-6, 6), dt=1e-3,
intra_sv=1., structured=True, subj_idx=None,
method='cdf', **params):
"""
A private function used by gen_rand_data
Returns a DataFrame of randomly simulated RTs from the DDM.
:Arguments:
params : dict
Parameter names and values to use for simulation.
:Optional:
size : int
Number of RTs to simulate.
range_ : tuple
Minimum (negative) and maximum (positve) RTs.
dt : float
Number of steps/sec.
intra_sv : float
Intra-trial variability.
structured : bool
Return a structured array with fields 'RT'
and 'response'.
subj_idx : int
If set, append column 'subj_idx' with value subj_idx.
method : str
Which method to use to simulate the RTs:
* 'cdf': fast, uses the inverse of cumulative density function to sample, dt can be 1e-2.
* 'drift': slow, simulates each complete drift process, dt should be 1e-4.
"""
if 'v_switch' in params and method != 'drift':
print("Warning: Only drift method supports changes in drift-rate. v_switch will be ignored.")
# Set optional default values if they are not provided
for var_param in ('sv', 'sz', 'st'):
if var_param not in params:
params[var_param] = 0
if 'z' not in params:
params['z'] = .5
if 'sv' not in params:
params['sv'] = 0
if 'sz' not in params:
params['sz'] = 0
#check sample
if isinstance(size, tuple): #this line is because pymc stochastic use tuple for sample size
if size == ():
size = 1
else:
size = size[0]
if method=='cdf_py':
rts = _gen_rts_from_cdf(params, size, range_, dt)
elif method=='drift':
rts = _gen_rts_from_simulated_drift(params, size, dt, intra_sv)[0]
elif method=='cdf':
rts = hddm.wfpt.gen_rts_from_cdf(params['v'],params['sv'],params['a'],params['z'],
params['sz'],params['t'],params['st'],
size, range_[0], range_[1], dt)
else:
raise TypeError("Sampling method %s not found." % method)
if not structured:
return rts
else:
data = | pd.DataFrame(rts, columns=['rt']) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), | pd.Timedelta('3 days 00:00:00') | pandas.Timedelta |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo = algos.SelectAll(include_no_data=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('bt.ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].ix[dts[1]] = 105
data['c1'].ix[dts[2]] = 95
data['c1'].ix[dts[3]] = 105
data['c1'].ix[dts[4]] = 95
# low vol c2
data['c2'].ix[dts[1]] = 100.1
data['c2'].ix[dts[2]] = 99.9
data['c2'].ix[dts[3]] = 100.1
data['c2'].ix[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('bt.ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_deltas():
algo = algos.LimitDeltas(0.1)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_or():
target = mock.MagicMock()
target.temp = {}
#run on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
runOnDateAlgo2 = algos.RunOnDate(pd.to_datetime('2018-01-03'))
runOnDateAlgo3 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
runOnDateAlgo4 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
orAlgo = algos.Or([runOnDateAlgo, runOnDateAlgo2, runOnDateAlgo3, runOnDateAlgo4])
#verify it returns false when neither is true
target.now = pd.to_datetime('2018-01-01')
assert not orAlgo(target)
# verify it returns true when the first is true
target.now = pd.to_datetime('2018-01-02')
assert orAlgo(target)
# verify it returns true when the second is true
target.now = pd.to_datetime('2018-01-03')
assert orAlgo(target)
# verify it returns true when both algos return true
target.now = pd.to_datetime('2018-01-04')
assert orAlgo(target)
def test_TargetVol():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=7)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.) | pandas.DataFrame |
import os
import pickle
import pathlib
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import joblib
PATH = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
BIN_PATH = PATH / "bin"
DATA_PATH = PATH / "_data"
NO_FEATURES = ['id', 'tile', 'cnt', 'ra_k', 'dec_k']
def create_dir(path):
path = pathlib.Path(path)
if path.is_dir():
raise IOError("Please remove the directory {}".format(path))
os.makedirs(str(path))
def scale(df):
df = df.copy()
features = [c for c in df.columns.values if c not in NO_FEATURES]
scaler = StandardScaler()
df[features] = scaler.fit_transform(df[features].values)
return scaler, df
def build():
create_dir(DATA_PATH)
by_tile = {}
tile_outs, scaled_outs, scalers_outs = {}, {}, {}
for path in BIN_PATH.glob("*.csv.bz2"):
tilename = path.name.split("_", 2)[1]
if tilename not in by_tile:
by_tile[tilename] = []
tile_outs[tilename] = path.name.split("_-", 1)[0] + ".pkl.bz2"
scaled_outs[tilename] = (
path.name.split("_-", 1)[0] + "_scaled.pkl.bz2")
scalers_outs[tilename] = (
"scaler_" + path.name.split("_-", 1)[0] + ".pkl.bz2")
by_tile[tilename].append(path)
merged = {}
for tile, parts in by_tile.items():
print(f">>> TILE {tile}")
tile_outpath = DATA_PATH / tile_outs[tile]
scaled_outpath = DATA_PATH / scaled_outs[tile]
scaler_outpath = DATA_PATH / scalers_outs[tile]
merged = None
for p in parts:
print(f" !!! Reading {p}")
df = | pd.read_csv(p) | pandas.read_csv |
import discord
import requests
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import random
TOKEN = 'YOUR TOKEN HERE'
KEY = 'YOUR API KEY HERE'
client = discord.Client()
command = '!COVID:'
@client.event
async def on_ready():
await client.change_presence(activity = discord.Activity(type = discord.ActivityType.listening, name = "scanning COVID data sources..."))
@client.event
async def on_message(message):
if message.content.startswith(command):
state = message.content.replace(command, '')
try:
url = 'https://api.covidactnow.org/v2/state/{}.timeseries.json?apiKey={}'.format(state, KEY)
response = requests.get(url).json()
data = | pd.DataFrame(response['metricsTimeseries']) | pandas.DataFrame |
import sys
sys.path.append('../')
from matplotlib import figure
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.collections import PatchCollection
from matplotlib.colors import ListedColormap
import os
from tqdm import tqdm
### Config folders
config_data = pd.read_csv('config.csv', sep=',', header=None, index_col=0)
figures_path = config_data.loc['figures_dir_art'][1]
results_path = config_data.loc['results_dir'][1]
results_test_path = config_data.loc['results_test_dir'][1]
ages_data_path = config_data.loc['bogota_age_data_dir'][1]
houses_data_path = config_data.loc['bogota_houses_data_dir'][1]
### Arguments
import argparse
parser = argparse.ArgumentParser(description='Dynamics visualization.')
parser.add_argument('--population', default=10000, type=int,
help='Speficy the number of individials')
parser.add_argument('--type_sim', default='intervention', type=str,
help='Speficy the type of simulation to plot')
args = parser.parse_args()
number_nodes = args.population
pop = number_nodes
### Read functions
def load_results_ints(type_res,n,int_effec,schl_occup,type_mask,frac_people_mask,ventilation,path=results_path):
read_path = os.path.join(path,'{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_ND_{}.csv'.format(str(n),str(int_effec),
str(schl_occup),type_mask,str(frac_people_mask),str(ventilation),type_res))
read_file = pd.read_csv(read_path)
return read_file
def load_results_ints_test(type_res,n,int_effec,schl_occup,layer,path=results_path):
read_path = os.path.join(path,str(n),'{}_layerInt_{}_inter_{}_schoolcap_{}_{}.csv'.format(str(n),str(layer),str(int_effec),
str(schl_occup),type_res))
read_file = pd.read_csv(read_path)
return read_file
### Read file
results_path = os.path.join(results_path,'intervention',str(pop))
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Plot proportional areas (each mask type) for each level of ventilation
def nested_circles(data, labels=None, c=None, ax=None,
cmap=None, norm=None, textkw={}):
ax = ax or plt.gca()
data = np.array(data)
R = np.sqrt(data/data.max())
p = [plt.Circle((0,r), radius=r) for r in R[::-1]]
arr = data[::-1] if c is None else np.array(c[::-1])
col = PatchCollection(p, cmap=cmap, norm=norm, array=arr)
ax.add_collection(col)
ax.axis("off")
ax.set_aspect("equal")
ax.autoscale()
if labels is not None:
kw = dict(color="k", va="center", ha="center")
kw.update(textkw)
ax.text(0, R[0], labels[0], **kw)
for i in range(1, len(R)):
ax.text(0, R[i]+R[i-1], labels[i], **kw)
return col
# from pylab import *
# cmap = cm.get_cmap('gist_heat_r', 5) # PiYG
# colors_plt = []
# for i in range(cmap.N):
# rgba = cmap(i)
# # rgb2hex accepts rgb or rgba
# colors_plt.append(matplotlib.colors.rgb2hex(rgba))
# colors_plt = colors_plt[1:4]
# def plot_examples(cms):
# """
# helper function to plot two colormaps
# """
# np.random.seed(19680801)
# data = np.random.randn(30, 30)
# fig, axs = plt.subplots(1, 2, figsize=(6, 3), constrained_layout=True)
# for [ax, cmap] in zip(axs, cms):
# psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4)
# fig.colorbar(psm, ax=ax)
# plt.show()
# viridisBig = cm.get_cmap('Reds_r', 512)
# newcmp = ListedColormap(viridisBig(np.linspace(0.95, 0.5, 256)))
# school_cap = 0.35
# fraction_people_masked = 1.0
# ventilation_vals = 0.0
# inter_ = 0.4
# masks = ['cloth','surgical','N95']
# masks_labels = ['Cloth','Surgical','N95']
# masks_labels = dict(zip(masks,masks_labels))
# df_list = []
# for m, mask_ in enumerate(masks):
# res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,ventilation_vals,path=results_path)
# for itr_ in range(10):
# res_read_i = res_read['iter'] == itr_
# res_read_i = pd.DataFrame(res_read[res_read_i])
# end_cases = res_read_i['E'].iloc[-1]
# df_res_i = pd.DataFrame(columns=['iter','mask','frac_mask','interven_eff','ventilation','end_cases'])
# df_res_i['iter'] = [int(itr_)]
# df_res_i['mask'] = masks_labels[mask_]
# df_res_i['frac_mask'] = r'{}%'.format(int(fraction_people_masked*100))
# df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
# df_res_i['ventilation'] = str(ventilation_vals)
# df_res_i['end_cases'] = end_cases*100
# df_list.append(df_res_i)
# df_final_E_lowVent = pd.concat(df_list)
# df_final_E_lowVent_meds = df_final_E_lowVent.groupby('mask').median().reset_index()
# percentagesData_E_lowVent_mends = list(df_final_E_lowVent_meds['end_cases'])
# percentagesLabels_E_lowVent_mends = [r'{:.2f}%'.format(end_cases) for end_cases in df_final_E_lowVent_meds['end_cases']]
# nested_circles(percentagesData_E_lowVent_mends,labels=percentagesLabels_E_lowVent_mends,cmap='copper',textkw=dict(fontsize=14))
# plt.show()
# test_vals = [8.420,100-8.420]
# test_labels = list("AB")
# nested_circles(test_vals, labels=test_labels, cmap="copper", textkw=dict(fontsize=14))
# plt.show()
# fig,ax = plt.subplots(1,1,figsize=(7, 6))
# sns.pointplot(ax=ax, data=df_final_E_v, x='end_cases', y='frac_mask', hue='mask', linestyles='',dodge=0.3,palette='plasma',alpha=0.8)
# ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
# plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
# ax.set_xlabel(r'Infections per 10,000',fontsize=17)
# ax.set_ylabel(r'Individuals wearing masks ($\%$)',fontsize=17)
# ax.set_title(r'Total infections | schools at {}$\%$, low ventilation'.format(str(school_cap*100)),fontsize=17)
# plt.xticks(size=16)
# plt.yticks(size=16)
# #plt.xlim([4850,6000])
# save_path = os.path.join(figures_path,'point_plots','totalInfections_n_{}_schoolcap_{}_ventilation_{}_inter_{}.png'.format(str(pop),str(0.35),str(ventilation_vals[0]),str(inter_)))
# plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
# school_cap = 0.35
# fraction_people_masked = 1.0
# ventilation_vals = 15.0
# inter_ = 0.4
# masks = ['cloth','surgical','N95']
# masks_labels = ['Cloth','Surgical','N95']
# masks_labels = dict(zip(masks,masks_labels))
# df_list = []
# for m, mask_ in enumerate(masks):
# res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,ventilation_vals,path=results_path)
# for itr_ in range(10):
# res_read_i = res_read['iter'] == itr_
# res_read_i = pd.DataFrame(res_read[res_read_i])
# end_cases = res_read_i['E'].iloc[-1]
# df_res_i = pd.DataFrame(columns=['iter','mask','frac_mask','interven_eff','ventilation','end_cases'])
# df_res_i['iter'] = [int(itr_)]
# df_res_i['mask'] = masks_labels[mask_]
# df_res_i['frac_mask'] = r'{}%'.format(int(fraction_people_masked*100))
# df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
# df_res_i['ventilation'] = str(ventilation_vals)
# df_res_i['end_cases'] = end_cases*100
# df_list.append(df_res_i)
# df_final_E_highVent = pd.concat(df_list)
# df_final_E_highVent_meds = df_final_E_highVent.groupby('mask').median().reset_index()
# percentagesData_E_lowVent_mends = list(df_final_E_highVent_meds['end_cases'])
# percentagesLabels_E_lowVent_mends = [r'{:.2f}%'.format(end_cases) for end_cases in df_final_E_highVent_meds['end_cases']]
# nested_circles(percentagesData_E_lowVent_mends,labels=percentagesLabels_E_lowVent_mends,cmap='copper',textkw=dict(fontsize=14))
# plt.show()
# test_vals = [30.035,100-30.035]
# test_labels = list("AB")
# nested_circles(test_vals, labels=test_labels, cmap="copper", textkw=dict(fontsize=14))
# plt.show()
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Bar plots testes
intervention_effcs = [0.0,0.2,0.4]
school_cap = [0.35] #,0.35]
layers_test = ['work','community','all']
layers_labels = ['Intervención sobre sitios de trabajo','Intervención sobre comunidad','Intervención completa']
layers_labels = dict(zip(layers_test,layers_labels))
df_list = []
for l, layer_ in enumerate(layers_test):
for i, inter_ in enumerate(intervention_effcs):
for j, schl_cap_ in enumerate(school_cap):
res_read = load_results_ints_test('soln_cum',args.population,inter_,schl_cap_,layer_,results_test_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_cases = res_read_i['E'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Inter.Layer','interven_eff','end_cases'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Inter.Layer'] = layers_labels[layer_]
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['end_cases'] = end_cases*100
df_list.append(df_res_i)
df_final_E = pd.concat(df_list)
fig,ax = plt.subplots(1,1,figsize=(9, 6))
sns.catplot(ax=ax, data=df_final_E, y='interven_eff', x='end_cases', hue='Inter.Layer',kind='bar',palette='Blues',alpha=0.7,legend=False)
#ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
plt.legend(bbox_to_anchor=(1.02,0.6),title='',frameon=False, fontsize=16)
#plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
plt.ylabel(r'Efficiencia de intervención, ($\%$)',fontsize=17)
plt.xlabel(r'% Infectados',fontsize=17)
plt.title(r'Infecciones totales | Colegios al {}%'.format(str(int(school_cap[0]*100))),fontsize=17)
plt.xticks(size=16)
plt.yticks(size=16)
save_path = os.path.join(figures_path,'bar_plots','layersInter_totalInfections_n_{}_schoolcap_{}_.png'.format(str(pop),str(school_cap[0])))
plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Bar plots
# End infections plotting ventilation and mask
intervention_effcs = [0.0,0.2,0.4] #,0.6]
interv_legend_label = [r'$0\%$ intervention efficiency',r'$20\%$ intervention efficiency',r'$40\%$ intervention efficiency'] #,r'$40\%$ intervention efficiency',r'$60\%$ intervention efficiency'] #,r'No intervention, schools $100\%$ occupation']
school_cap = 0.35
fraction_people_masked = 1.0
ventilation_vals = [0.0,5.0,8.0,15.0]
ventilation_labels = ['Cero','Baja','Media','Alta']
ventilation_labels = dict(zip(ventilation_vals,ventilation_labels))
masks = ['cloth','surgical','N95']
masks_labels = {'cloth':'Tela','surgical':'Quirúrgicos','N95':'N95'}
states_ = ['S', 'E', 'I1', 'I2', 'I3', 'D', 'R']
df_list = []
inter_ = intervention_effcs[0]
for m, mask_ in enumerate(masks):
for j, vent_ in enumerate(ventilation_vals):
res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,vent_,path=results_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_cases = res_read_i['E'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Tacapobas','interven_eff','ventilation','end_cases'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Tacapobas'] = str(masks_labels[mask_])
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['ventilation'] = ventilation_labels[vent_]
df_res_i['end_cases'] = end_cases*100
df_list.append(df_res_i)
df_final_E = pd.concat(df_list)
plt.figure(figsize=(7,6))
sns.catplot(data=df_final_E, x='ventilation', y='end_cases', hue='Tacapobas',kind='bar',palette='Reds_r',alpha=0.8)
#ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
#plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
plt.xlabel('Ventilación',fontsize=17)
plt.ylabel(r'% Infectados',fontsize=17)
plt.title(r'Infecciones totales | colegios {}$\%$, intervención {}$\%$'.format(str(int(school_cap*100)),str(int(inter_*100))),fontsize=17)
plt.xticks(size=16)
plt.yticks(size=16)
plt.ylim([0,101])
#plt.show()
save_path = os.path.join(figures_path,'bar_plots','totalInfections_n_{}_inter_{}_schoolcap_{}_.png'.format(str(pop),str(inter_),str(0.35)))
plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
# End deaths plotting ventilation and mask
inter_ = intervention_effcs[2]
for m, mask_ in enumerate(masks):
for j, vent_ in enumerate(ventilation_vals):
res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,vent_,path=results_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_dead = res_read_i['D'].iloc[-1]
df_res_i = | pd.DataFrame(columns=['iter','Mask','interven_eff','ventilation','end_dead']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert | Index(i.values) | pandas.Index |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['universal_key', 'find_date', 'find_float_time', 'week_from_start', 'load_public_data',
'filtering_usable_data', 'prepare_baseline_and_intervention_usable_data', 'in_good_logging_day',
'most_active_user', 'convert_loggings', 'get_types', 'eating_intervals_percentile', 'summarize_data',
'breakfast_analysis_summary', 'breakfast_analysis_variability', 'breakfast_avg_histplot',
'breakfast_sample_distplot', 'dinner_analysis_summary', 'dinner_analysis_variability', 'dinner_avg_histplot',
'dinner_sample_distplot', 'swarmplot', 'FoodParser']
# Cell
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from scipy import stats
import seaborn as sns
import os
import matplotlib.pyplot as plt
import pickle
from datetime import date
from datetime import datetime
from collections import defaultdict
import nltk
nltk.download('words', quiet=True)
nltk.download('stopwords', quiet=True)
nltk.download('wordnet', quiet=True)
nltk.download('punkt', quiet=True)
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords, words
import re
import string
import pkg_resources
# Cell
def universal_key(in_path):
"""
Description:\n
This is a helper function that converts pickle and csv file into a pd dataframe.\n
Input:\n
- in_path(str, pandas df): input path, csv or pickle file\n
Output:\n
- df : dataframe format of the in_path file.\n
"""
if isinstance(in_path, str):
if in_path.split('.')[-1] == 'pickle':
# load data
pickle_file = open(in_path, 'rb')
df = pickle.load(pickle_file)
print('read the pickle file successfully.')
if in_path.split('.')[-1] == 'csv':
df = pd.read_csv(in_path)
print('read the csv file successfully.')
else:
df = in_path
return df
# Cell
def find_date(in_path, col, h=0):
"""
Description:\n
Extract date information from a column and shift each date in the column by h hours. (Day starts h hours early if h is negative and h hours late if h is positive)\n
Input:\n
- in_path(str, pandas df): input path, file in pickle, csv or pandas dataframe format\n
- col(str) : column that contains the information of date and time, which is 24 hours system.
- h(int) : hours to shift the date. For example, when h = 4, everyday starts and ends 4 hours later than normal.
Return:\n
- a pandas series represents the date extracted from col.\n
Requirements:\n
Elements in col should be pd.datetime objects
"""
df = universal_key(in_path)
if df[col].dtype == 'O':
raise TypeError("'{}' column must be converted to datetime object".format(col))
def find_date(d, h):
if h > 0:
if d.hour < h:
return d.date() - pd.Timedelta('1 day')
if h < 0:
if d.hour+1 > (24+h):
return d.date() + pd.Timedelta('1 day')
return d.date()
return df[col].apply(find_date, args=([h]))
# Cell
def find_float_time(in_path, col, h=0):
"""
Description:\n
Extract time information from a column and shift each time by h hours. (Day starts h hours early if h is negative and h hours late if h is positive)\n
Input:\n
- in_path (str, pandas df): input path, file in pickle, csv or pandas dataframe format\n
- col(str) : column that contains the information of date and time that's 24 hours system.
- h(int) : hours to shift the date. For example, when h = 4, everyday starts at 4 and ends at 28. When h = -4, everyday starts at -4 and ends at 20.
Return:\n
- a pandas series represents the date extracted from col.\n
Requirements:\n
Elements in col should be pd.datetime objects
"""
df = universal_key(in_path)
if df[col].dtype == 'O':
raise TypeError("'{}' column must be converted to datetime object firsly".format(col))
local_time = df[col].apply(lambda x: pd.Timedelta(x.time().isoformat()).total_seconds() /3600.)
if h > 0:
local_time = np.where(local_time < h, 24+ local_time, local_time)
return pd.Series(local_time)
if h < 0:
local_time = np.where(local_time > (24+h), local_time-24., local_time)
return pd.Series(local_time)
return local_time
# Cell
def week_from_start(in_path, col, identifier):
"""
Description:\n
Calculate the number of weeks for each logging since the first day of the logging for each participant(identifier). The returned values for loggings from the first week are 1.
Input:\n
- in_path (str, pandas df): input path, file in pickle, csv or pandas dataframe format\n
- col (str): column name that contains date information from the in_path dataframe.
- identifier (str): unique_id or ID, or name that identifies people.
Return:\n
- a numpy array represents the date extracted from col.\n
"""
df = universal_key(in_path)
if 'date' not in df.columns:
raise NameError("There must exist a 'date' column.")
# Handle week from start
df_dic = dict(df.groupby(identifier).agg(np.min)[col])
def count_week_public(s):
return (s.date - df_dic[s[identifier]]).days // 7 + 1
return df.apply(count_week_public, axis = 1)
# Cell
def load_public_data(in_path, h):
"""
Description:\n
Load original public data and output processed data in pickle format.\n
Process includes:\n
1. Dropping 'foodimage_file_name' column.\n
2. Handling the format of time by deleting am/pm by generating a new column, 'original_logtime_notz'\n
3. Generating the date column with possible hour shifts, 'date'\n
4. Converting time into float number into a new column with possible hour shifts, 'local_time'\n
5. Converting time to a format of HH:MM:SS, 'time'\n
6. Generating the column 'week_from_start' that contains the week number that the participants input the food item.\n
7. Generating 'year' column based on the input data.\n
Input:\n
- in_path (str, pandas df): input path, csv file\n
- h(int) : hours to shift the date. For example, when h = 4, everyday starts and ends 4 hours later than normal.
Output:\n
- public_all: the processed dataframe\n
Requirements:\n
in_path file must have the following columns:\n
- foodimage_file_name\n
- original_logtime\n
- date\n
- unique_code\n
"""
public_all = universal_key(in_path).drop(columns = ['foodimage_file_name'])
def handle_public_time(s):
"""
helper function to get rid of am/pm in the end of each time string
"""
tmp_s = s.replace('p.m.', '').replace('a.m.', '')
try:
return pd.to_datetime(' '.join(tmp_s.split()[:2]) )
except:
try:
if int(tmp_s.split()[1][:2]) > 12:
tmp_s = s.replace('p.m.', '').replace('a.m.', '').replace('PM', '').replace('pm', '')
return pd.to_datetime(' '.join(tmp_s.split()[:2]) )
except:
return np.nan
public_all['original_logtime_notz'] = public_all['original_logtime'].apply(handle_public_time)
public_all = public_all.dropna().reset_index(drop = True)
public_all['date'] = find_date(public_all, 'original_logtime_notz', h)
# Handle the time - Time in floating point format
public_all['local_time'] = find_float_time(public_all, 'original_logtime_notz', h)
# Handle the time - Time in Datetime object format
public_all['time'] = | pd.DatetimeIndex(public_all.original_logtime_notz) | pandas.DatetimeIndex |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import tqdm
def load_data(index=0):
""" 0: C7
1: C8
2: C9
3: C11
4: C13
5: C14
6: C15
7: C16
Note that C7 and C13 included a short break
(for about 100 timestamps long)
between the two procedure.
"""
fp = os.path.dirname(__file__)
if index == 0:
df = pd.read_csv(fp + '/C7-1.csv.gz')
df = pd.concat([df, pd.read_csv(fp + '/C7-2.csv.gz')])
df = df.reset_index(drop=True)
df.Timestamp = df.index.values
return df
elif index == 1:
return pd.read_csv(fp + '/C8.csv.gz')
elif index == 2:
return pd.read_csv(fp + '/C9.csv.gz')
elif index == 3:
return pd.read_csv(fp + '/C11.csv.gz')
elif index == 4:
df = pd.read_csv(fp + '/C13-1.csv.gz')
df = pd.concat([df, pd.read_csv(fp + '/C13-2.csv.gz')])
df = df.reset_index(drop=True)
df.Timestamp = df.index.values
return df
elif index == 5:
return pd.read_csv(fp + '/C14.csv.gz')
elif index == 6:
return pd.read_csv(fp + '/C15.csv.gz')
elif index == 7:
return pd.read_csv(fp + '/C16.csv.gz')
else:
raise ValueError
def rename_components(df):
""" current and speed
"""
# Rename L
L_curr = ['L_1', 'L_3', 'L_4', 'L_7', 'L_9']
L_speed = ['L_2', 'L_6', 'L_5', 'L_8', 'L_10']
df = df.rename(columns={k: f'c{i}_curr' for i, k in enumerate(L_curr)})
df = df.rename(columns={k: f'c{i}_speed' for i, k in enumerate(L_speed)})
# Rename A, B, and C
df = df.rename(columns={f'A_{i}': f'c5_val{i}' for i in range(1, 6)})
df = df.rename(columns={f'B_{i}': f'c6_val{i}' for i in range(1, 6)})
df = df.rename(columns={f'C_{i}': f'c7_val{i}' for i in range(1, 6)})
return df[df.columns.sort_values()]
def load_clean_data(index=0):
return rename_components(load_data(index=index))
def set_broken_labels(df, size):
labels = np.zeros(df.shape[0])
labels[-size:] = 1
df['broken'] = labels
return df
def run_to_failure_aux(df, n_sample, desc=''):
seq_len = df.shape[0]
samples = []
pbar = tqdm.tqdm(total=n_sample, desc=desc)
while len(samples) < n_sample:
# random censoring
t = np.random.randint(2, seq_len)
sample = {'lifetime': t, 'broken': df.loc[t, 'broken']}
sample = pd.DataFrame(sample, index=[0])
features = df.iloc[:t].mean(axis=0)[:-1]
sample[features.keys()] = features.values
samples.append(sample)
# break
pbar.update(1)
return pd.concat(samples, axis=0).reset_index(drop=True)
def generate_run_to_failure(n_sample=1000, bronken_holdout_steps=2000):
samples = []
print('Generating run-to-failure data:')
for index in range(8):
raw_df = load_clean_data(index=index).set_index('Timestamp')
raw_df = set_broken_labels(raw_df, size=bronken_holdout_steps)
sample = run_to_failure_aux(
raw_df, n_sample, desc=f'component {index+1}/8')
sample['trial_id'] = index
samples.append(sample)
return | pd.concat(samples, axis=0) | pandas.concat |
from bs4 import BeautifulSoup
import requests
import pandas as pd
import matplotlib.pyplot as plt
url = "https://www.mohfw.gov.in/"
def get_url() -> str:
return url
def get_response(url) -> "response":
return requests.get(url, timeout=10)
def return_content() -> "html":
return BeautifulSoup(get_response(get_url()).content, "html.parser")
content = return_content()
def get_content() -> "html":
return content
def get_helpline_numbers() -> "list":
helpline_numbers = []
content = get_content()
for helpline in content.findAll('div', attrs={"class": "progress-block-area"}):
number_text = helpline.find('p', attrs={"class": "mblock"}).get_text(
strip=True, separator='|').split('|')
for numbers in number_text:
if numbers.find(":") != -1:
helpline_numbers.append(numbers[numbers.find(":") + 1:])
else:
helpline_numbers.append(numbers)
return helpline_numbers
def get_summary() -> "list":
label_data = []
summary_data = []
summary_content = get_content()
summary = summary_content.findAll(
'div', attrs={"class": "information_block"})
for data in summary:
label_data = data.findAll('div', attrs={"class": "iblock_text"})
for text in label_data:
a = text.find('div')
b = text.find('span')
summary_data.append(a.get_text() + ': ' + b.get_text() + ', ')
return summary_data
def get_state_wise_data() -> "list":
p_content = get_content()
state_list = []
state_wise_data = p_content.findAll('div', attrs={"id": "cases"})
for l_p in state_wise_data:
para = l_p.find('p')
state_list.append(para.get_text())
for l_data in state_wise_data:
table = l_data.find('table')
title = []
for l_title in table.find_all('th'):
for l_title_ind in l_title:
title.append(l_title_ind.get_text())
state_list.append(title)
state_data = []
for l_row in table.find_all('tr')[:-1]:
temp = []
l_row_data = l_row.find_all('td')
for l_ind_data in l_row_data:
temp_data = ""
if l_ind_data.get_text().endswith("#") or l_ind_data.get_text().endswith("*"):
temp_data = l_ind_data.get_text()[:-1]
temp.append(temp_data)
else:
temp.append(l_ind_data.get_text())
if len(temp) != 0:
state_data.append(temp)
state_list.append(state_data)
state_data_frame = pd.DataFrame(state_list[2])
state_data_frame.columns = state_list[1]
return_data = []
return_data.append(state_list[0])
return_data.append(state_data_frame)
return return_data
if __name__ == '__main__':
print("Welcome to covid-19 command line utility")
print("Press 1: For getting helpline numbers")
print("Press 2: For getting summary")
print("Press 3: For getting state wise report")
print("Press 4: For getting state wise report(graphical format)")
print("Press 5: For a list of state wise Helpline numbers")
print("Press 6: For Helpline number of a state")
a = 0
while(a != -1):
a = int(input("Enter Your Choice: "))
if a == 1:
helpline_numbers = get_helpline_numbers()
print("First Number: " + helpline_numbers[0])
print("Second Number: " + helpline_numbers[1])
print("All the numbers are Toll Free")
if a == 2:
m_summary_data = get_summary()
m_summary = ' '.join(m_summary_data)
print(m_summary)
if a == 3 or a == 4:
m_state_wise_data = get_state_wise_data()
print(m_state_wise_data[0])
m_state_data_frame = m_state_wise_data[1][:-1]
states = list(m_state_data_frame['Name of State / UT'])
plot_data = {}
plot_data['Total Confirmed cases *'] = list(
pd.to_numeric(m_state_data_frame['Total Confirmed cases *'], downcast="float"))
plot_data['Cured/Discharged/Migrated'] = list(
pd.to_numeric(m_state_data_frame['Cured/Discharged/Migrated'], downcast="float"))
plot_data['Death'] = list(pd.to_numeric(
m_state_data_frame['Death'], downcast="float"))
plot_data_frame = | pd.DataFrame(plot_data, index=states) | pandas.DataFrame |
################################################################################
# Module: dataportal.py
# Description: Various functions to acquire building archetype data using
# available APIs
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import hashlib
import io
import json
import logging as lg
import os
import re
import time
import zipfile
import pandas as pd
import pycountry as pycountry
import requests
from archetypal import settings
from archetypal.utils import log
def tabula_available_buildings(country_name="France"):
"""Returns all available building types for a specific country.
Args:
country_name (str): The name of the country. pycountry is used to
resolve country names. Therefore, a country code (e.g. "FRA") can be
passed as well.
"""
# Check code country
code_country = _resolve_codecountry(country_name)
data = {"code_country": code_country}
json_response = tabula_api_request(data, table="all-country")
# load data
df = pd.DataFrame(json_response)
df = df.data.apply(pd.Series)
return df
def tabula_api_request(data, table="detail"):
"""Send a request to the TABULA API via HTTP GET and return the JSON
response.
Args:
data (dict): dictionnary of query attributes. with table='all-country',
data expects 'code_country'. with table='detail', data expects
'buildingtype', 'suffix', and 'variant'.
table (str): the server-table to query. 'detail' or 'all-country'
"""
# Prepare URL
if table == "all-country":
codehex = str(
int(hashlib.md5(data["code_country"].encode("utf-8")).hexdigest(), 16)
)[0:13]
url_base = (
"http://webtool.building-typology.eu/data/matrix/building"
"/{0}/p/0/o/0/l/10/dc/{1}"
)
prepared_url = url_base.format(data["code_country"], codehex)
elif table == "detail":
buildingtype = ".".join(s for s in data["buildingtype"])
suffix = ".".join(s for s in data["suffix"])
bldname = buildingtype + "." + suffix
hexint = hashlib.md5(bldname.encode("utf-8")).hexdigest()[0:13]
url_base = (
"http://webtool.building-typology.eu/data/adv/building"
"/detail/{0}/bv/{1}/dc/{2}"
)
prepared_url = url_base.format(bldname, data["variant"], hexint)
else:
raise ValueError('server-table name "{}" invalid'.format(table))
# First, try to get the cached resonse from file
cached_response_json = get_from_cache(prepared_url)
if cached_response_json is not None:
# found this request in the cache, just return it instead of making a
# new HTTP call
return cached_response_json
else:
# if this URL is not already in the cache, request it
response = requests.get(prepared_url)
if response.status_code == 200:
response_json = response.json()
if "remark" in response_json:
log(
'Server remark: "{}"'.format(
response_json["remark"], level=lg.WARNING
)
)
elif not response_json["success"]:
raise ValueError(
'The query "{}" returned no results'.format(prepared_url),
lg.WARNING,
)
save_to_cache(prepared_url, response_json)
return response_json
else:
# Handle some server errors
pass
def tabula_building_details_sheet(
code_building=None,
code_country="FR",
code_typologyregion="N",
code_buildingsizeclass="SFH",
code_construcionyearclass=1,
code_additional_parameter="Gen",
code_type="ReEx",
code_num=1,
code_variantnumber=1,
):
"""How to format ``code_building``. Format the :attr:`code_building` string
as such:
Args:
code_building (str): The building code string.
::
Whole building code e.g.:
AT.MT.AB.02.Gen.ReEx.001.001"
| | | | | | | |___code_variantnumber
| | | | | | |_______code_num
| | | | | |___________code_type
| | | | |_______________code_additional_parameter
| | | |___________________code_construcionyearclass
| | |______________________code_buildingsizeclass
| |_________________________code_typologyregion
|____________________________code_country
code_country (str): Country name or International Country Code (ISO
3166-1-alpha-2 code). Input as 'France' will work equally as 'FR'.
code_typologyregion (str): N for national; otherwise specific codes
representing regions in a given country
code_buildingsizeclass (str): 4 standardized classes: 'SFH':
code_construcionyearclass (int or str): allocation of time bands to
classes. Defined nationally (according to significant changes in
construction technologies, building codes or available statistical
data
code_additional_parameter (str): 1 unique category. Defines the generic
(or basic) typology matrix so that each residential building of a
given country can be assigned to one generic type. A further
segmentation in subtypes is possible and can be indicated by a
specific code. Whereas the generic types must comprise the whole
building stock the total of subtypes must be comprehensive. e.g.
'HR' (highrises), 'TFrame' (timber frame), 'Semi' (semi-detached)
code_type: “ReEx” is a code for “real example” and “SyAv” for
“Synthetical Average”
code_num: TODO: What is this parameter?
code_variantnumber: the energy performance level 1, 2 and 3. 1: minimum
requirements, 2: improved and 3: ambitious or NZEB standard (assumed
or announced level of Nearly Zero-Energy Buildings)
Returns:
pandas.DataFrame: The DataFrame from the
"""
# Parse builsing_code
if code_building is not None:
try:
(
code_country,
code_typologyregion,
code_buildingsizeclass,
code_construcionyearclass,
code_additional_parameter,
code_type,
code_num,
code_variantnumber,
) = code_building.split(".")
except ValueError:
msg = (
'the query "{}" is missing a parameter. Make sure the '
'"code_building" has the form: '
"AT.MT.AB.02.Gen.ReEx.001.001"
).format(code_building)
log(msg, lg.ERROR)
raise ValueError(msg)
# Check code country
code_country = _resolve_codecountry(code_country)
# Check code_buildingsizeclass
if code_buildingsizeclass.upper() not in ["SFH", "TH", "MFH", "AB"]:
raise ValueError(
'specified code_buildingsizeclass "{}" not supported. Available '
'values are "SFH", "TH", '
'"MFH" or "AB"'
)
# Check numericals
if not isinstance(code_construcionyearclass, str):
code_construcionyearclass = str(code_construcionyearclass).zfill(2)
if not isinstance(code_num, str):
code_num = str(code_num).zfill(3)
if not isinstance(code_variantnumber, str):
code_variantnumber = str(code_variantnumber).zfill(3)
# prepare data
data = {
"buildingtype": [
code_country,
code_typologyregion,
code_buildingsizeclass,
code_construcionyearclass,
code_additional_parameter,
],
"suffix": [code_type, code_num],
"variant": code_variantnumber,
}
json_response = tabula_api_request(data, table="detail")
if json_response is not None:
log("")
# load data
df = pd.DataFrame(json_response)
df = df.data.apply(pd.Series)
# remove html tags from labels
df.label = df.label.str.replace("<[^<]+?>", " ")
return df
else:
raise ValueError(
'No data found in TABULA matrix with query:"{}"\nRun '
"archetypal.dataportal.tabula_available_buildings() "
'with country code "{}" to get list of possible '
"building types"
"".format(".".join(s for s in data["buildingtype"]), code_country)
)
def tabula_system(code_country, code_boundarycond="SUH", code_variantnumber=1):
"""Return system level information from TABULA archetypes.
Args:
code_country (str): the alpha-2 code of the country. eg. "FR"
code_boundarycond (str): choices are "SUH" and "MUH".
code_variantnumber (int):
"""
# Check code country
code_country = _resolve_codecountry(code_country)
# Check code_buildingsizeclass
if code_boundarycond.upper() not in ["SUH", "MUH"]:
raise ValueError(
'specified code_boundarycond "{}" not valid. Available values are '
'"SUH" (Single Unit Houses) '
'and "MUH" (Multi-unit Houses)'
)
# Check code variant number
if not isinstance(code_variantnumber, str):
code_variantnumber = str(code_variantnumber).zfill(2)
# prepare data
data = {"systype": [code_country, code_boundarycond, code_variantnumber]}
json_response = tabula_system_request(data)
if json_response is not None:
log("")
# load data
df = | pd.DataFrame(json_response) | pandas.DataFrame |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Series, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = f"Categories ({len(self.categories)}, {dtype}): "
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {len(self)}\n{info}"
def _get_repr(self, length=True, na_rep="NaN", footer=True) -> str:
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
"""
Return an item.
"""
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
"""
Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if isinstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# else: array of True/False in Series or Categorical
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(
self.codes.astype("int64"), categories.size
)
counts = counts.cumsum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, _result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
raise TypeError(f"Categorical cannot perform the operation {name}")
return func(**kwargs)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def min(self, skipna=True):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered("min")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].min()
else:
return np.nan
else:
pointer = self._codes.min()
return self.categories[pointer]
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def max(self, skipna=True):
"""
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered("max")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].max()
else:
return np.nan
else:
pointer = self._codes.max()
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
See Also
--------
pandas.unique
CategoricalIndex.unique
Series.unique
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list("baabc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list("baabc"), categories=list("abc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(
... list("baabc"), categories=list("abc"), ordered=True
... ).unique()
[b, a, c]
Categories (3, object): [a < b < c]
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype("int64")
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(
original.categories.take(uniques), dtype=original.dtype
)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
"""
Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ["counts", "freqs"]
result.index.name = "categories"
return result
@Substitution(klass="Categorical")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import concat_categorical
return concat_categorical(to_concat)
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : Equivalent method on Series.
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
def replace(self, to_replace, value, inplace: bool = False):
"""
Replaces all instances of one value with another
Parameters
----------
to_replace: object
The value to be replaced
value: object
The value to replace it with
inplace: bool
Whether the operation is done in-place
Returns
-------
None if inplace is True, otherwise the new Categorical after replacement
Examples
--------
>>> s = pd.Categorical([1, 2, 1, 3])
>>> s.replace(1, 3)
[3, 2, 3, 3]
Categories (2, int64): [2, 3]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
# build a dict of (to replace -> value) pairs
if is_list_like(to_replace):
# if to_replace is list-like and value is scalar
replace_dict = {replace_value: value for replace_value in to_replace}
else:
# if both to_replace and value are scalar
replace_dict = {to_replace: value}
# other cases, like if both to_replace and value are list-like or if
# to_replace is a dict, are handled separately in NDFrame
for replace_value, new_value in replace_dict.items():
if new_value == replace_value:
continue
if replace_value in cat.categories:
if isna(new_value):
cat.remove_categories(replace_value, inplace=True)
continue
categories = cat.categories.tolist()
index = categories.index(replace_value)
if new_value in cat.categories:
value_index = categories.index(new_value)
cat._codes[cat._codes == index] = value_index
cat.remove_categories(replace_value, inplace=True)
else:
categories[index] = new_value
cat.rename_categories(categories, inplace=True)
if not inplace:
return cat
# The Series.cat accessor
@delegate_names(
delegate=Categorical, accessors=["categories", "ordered"], typ="property"
)
@delegate_names(
delegate=Categorical,
accessors=[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
typ="method",
)
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s = pd.Series(list("abbccc")).astype("category")
>>> s
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): [a, b, c]
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
>>> s.cat.rename_categories(list("cba"))
0 c
1 b
2 b
3 a
4 a
5 a
dtype: category
Categories (3, object): [c, b, a]
>>> s.cat.reorder_categories(list("cba"))
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): [c, b, a]
>>> s.cat.add_categories(["d", "e"])
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (5, object): [a, b, c, d, e]
>>> s.cat.remove_categories(["a", "c"])
0 NaN
1 b
2 b
3 NaN
4 NaN
5 NaN
dtype: category
Categories (1, object): [b]
>>> s1 = s.cat.add_categories(["d", "e"])
>>> s1.cat.remove_unused_categories()
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): [a, b, c]
>>> s.cat.set_categories(list("abcde"))
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (5, object): [a, b, c, d, e]
>>> s.cat.as_ordered()
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): [a < b < c]
>>> s.cat.as_unordered()
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): [a, b, c]
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self._index = data.index
self._name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a 'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
"""
Return Series of codes as well as the index.
"""
from pandas import Series
return Series(self._parent.codes, index=self._index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self._index, name=self._name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
if is_extension_array_dtype(categories.dtype) and is_object_dtype(values):
# Support inferring the correct extension dtype from an array of
# scalar objects. e.g.
# Categorical(array[Period, Period], categories=PeriodIndex(...))
cls = categories.dtype.construct_array_type()
values = maybe_cast_to_extension_array(cls, values)
if not isinstance(values, cls):
# exception raised in _from_sequence
values = ensure_object(values)
categories = ensure_object(categories)
elif not dtype_equal:
values = ensure_object(values)
categories = ensure_object(categories)
hash_klass, vals = _get_data_algo(values)
_, cats = _get_data_algo(categories)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def recode_for_categories(codes: np.ndarray, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : np.ndarray
old_categories, new_categories : Index
Returns
-------
new_codes : np.ndarray[np.int64]
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1], dtype=int8)
"""
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(
new_categories.get_indexer(old_categories), new_categories
)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if is_sequence(list_like) or isinstance(list_like, tuple) or | is_iterator(list_like) | pandas.core.dtypes.common.is_iterator |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from pandas.plotting import scatter_matrix
from sklearn import model_selection, preprocessing, svm
from sklearn.linear_model import LinearRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import pickle
sp500 = pd.read_csv(r'tests\SP500.csv', parse_dates=True, index_col=0)
sp500['log_ret'] = np.log(sp500['Adj Close']/sp500['Adj Close'].shift(1))
sunspot = | pd.read_csv(r'tests\mean_sunspots.csv', parse_dates=True, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = | pd.np.empty((rows, cols, ztime)) | pandas.np.empty |
import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pytest
from test_pvsystem import sam_data
from conftest import requires_scipy
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture()
def location():
return Location(32.2, -111, altitude=700)
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
| assert_series_equal(ac, expected) | pandas.util.testing.assert_series_equal |
import matplotlib.cm as cm
import pandas as pd
import seaborn as sns
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
import matplotlib.pyplot as plt
import numpy as np
###############################################################################################################
# IMPORTANT: USE ONLY WITH LIST OF TWEETS CONTAINING A SIGNIFICANT AMOUNT FROM EACH USER PRESENT IN THE LIST #
# FOR EXAMPLE TWEETS OBTAINED WITH data-mining/getTimelines.py #
###############################################################################################################
FILENAME_TWEET = "../data-mining/results/timeline.csv" # List of tweets to consider
OUTPUT_FILENAME = "ReactionsVsFollowers.pdf" # Filename to store the plot
BUBBLE_SCALE = (300, 1600) # Scale of the bubbles
X_LOG = True # Wether or not to use log scale on X axis
Y_LOG = True # Wether or not to use log scale on Y axis
# Load all tweets
tweets = pd.read_csv(FILENAME_TWEET, dtype='str')
tweets.date = pd.to_datetime(tweets.date)
tweets.likes = pd.to_numeric(tweets.likes)
tweets.retweets = pd.to_numeric(tweets.retweets)
tweets.followers = | pd.to_numeric(tweets.followers) | pandas.to_numeric |
import os.path as osp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yaml
from matplotlib import cm
from src.furnishing.room import RoomDrawer
# from collections import OrderedDict
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
class Callback:
def __init__(self):
self.swarm_algorithm = None
def initialize_callback(self, swarm_algorithm):
self.swarm_algorithm = swarm_algorithm
def on_optimization_start(self):
pass
def on_optimization_end(self):
pass
def on_epoch_start(self):
pass
def on_epoch_end(self):
pass
class CallbackContainer(Callback):
def __init__(self, callbacks):
super().__init__()
self.callbacks = callbacks if callbacks else []
def __iter__(self):
for x in self.callbacks:
yield x
def __len__(self):
return len(self.callbacks)
def initialize_callback(self, swarm_algorithm):
for callback in self.callbacks:
callback.initialize_callback(swarm_algorithm)
def on_optimization_start(self):
for callback in self.callbacks:
callback.on_optimization_start()
def on_optimization_end(self):
for callback in self.callbacks:
callback.on_optimization_end()
def on_epoch_start(self):
for callback in self.callbacks:
callback.on_epoch_start()
def on_epoch_end(self):
for callback in self.callbacks:
callback.on_epoch_end()
class Drawer2d(Callback):
def __init__(self, space_boundaries, space_sampling_size=1000,
isolines_spacing=4, arrows=True):
super().__init__()
self.optimized_function = None
self.space_sampling_size = space_sampling_size
(self.x1, self.x2), (self.y1, self.y2) = space_boundaries
self.last_population = None
self.fig = None
self.ax = None
self.space_visualization_coordinates = None
self.contour_values = None
self.isolines_spacing = isolines_spacing
self.arrows = arrows
def initialize_callback(self, swarm_algorithm):
super().initialize_callback(swarm_algorithm)
self.optimized_function = swarm_algorithm.fit_function
x = np.linspace(self.x1, self.x2, self.space_sampling_size)
y = np.linspace(self.y1, self.y2, self.space_sampling_size)
self.space_visualization_coordinates = np.stack(np.meshgrid(x, y))
self.contour_values = self.optimized_function(
self.space_visualization_coordinates.reshape(2, -1).T
).reshape(self.space_sampling_size, self.space_sampling_size)
def on_optimization_start(self):
plt.ion()
def on_epoch_end(self):
super().on_epoch_end()
population = self.swarm_algorithm.population
plt.contour(
self.space_visualization_coordinates[0],
self.space_visualization_coordinates[1],
self.contour_values,
cmap=cm.coolwarm,
levels=np.arange(
np.min(self.contour_values).astype(np.float16),
np.max(self.contour_values).astype(np.float16),
self.isolines_spacing
),
zorder=1
)
plt.ylim(ymin=self.y1, ymax=self.y2)
plt.xlim(xmin=self.x1, xmax=self.x2)
if self.last_population is not None:
old_xs = self.last_population[:, 0]
old_ys = self.last_population[:, 1]
plt.scatter(
old_xs,
old_ys,
marker='x',
linewidths=2,
color='red',
s=100,
zorder=2
)
arrow_size = max(np.max(self.x2) - np.min(self.x1), np.max(self.y2) - np.min(self.y1))
for i in range(len(population)):
pos = self.last_population[i]
new_pos = population[i]
dx, dy = new_pos - pos
x, y = pos
if self.arrows:
plt.arrow(x, y, dx, dy, head_width=0.5,
head_length=1, fc='k', ec='k')
self.last_population = population
plt.pause(0.1)
plt.clf()
plt.cla()
def on_optimization_end(self):
plt.ioff()
class PrintLogCallback(Callback):
def on_epoch_end(self):
print('Epoch:', self.swarm_algorithm._step_number,
'Global Best:', self.swarm_algorithm.current_global_fitness)
class PandasLogCallback(Callback):
NON_HYPERPARAMS = ['population', 'population_size',
'_compiled', '_seed',
'_rng', '_step_number',
'fit_function',
'global_best_solution',
'local_best_solutions',
'nb_features',
'constraints',
'current_global_fitness',
'current_local_fitness']
def __init__(self):
super().__init__()
self.log_df = pd.DataFrame(columns=['Epoch', 'Best Global Fitness', 'Worst Local Fitness'])
def on_optimization_start(self):
epoch = int(self.swarm_algorithm._step_number)
bgfit = self.swarm_algorithm.current_global_fitness
wlfit = np.max(self.swarm_algorithm.current_local_fitness)
self.log_df = self.log_df.append({'Epoch': epoch,
'Best Global Fitness': bgfit,
'Worst Local Fitness': wlfit},
ignore_index=True)
def on_epoch_end(self):
epoch = int(self.swarm_algorithm._step_number)
bgfit = self.swarm_algorithm.current_global_fitness
wlfit = np.max(self.swarm_algorithm.current_local_fitness)
self.log_df = self.log_df.append({'Epoch': epoch,
'Best Global Fitness': bgfit,
'Worst Local Fitness': wlfit},
ignore_index=True)
def on_optimization_end(self):
self.log_df['Epoch'] = pd.to_numeric(self.log_df['Epoch'], downcast='integer')
def get_log(self):
return self.log_df
class FileLogCallback(PandasLogCallback):
def __init__(self, result_filename):
super().__init__()
self.result_filename = result_filename
def on_optimization_end(self):
meta = {'FitFunction': self.swarm_algorithm.fit_function.__self__.__class__.__name__,
'Algorithm': self.swarm_algorithm.__class__.__name__,
'PopulationSize': self.swarm_algorithm.population_size,
'NbFeatures': self.swarm_algorithm.nb_features}
hyperparams = self.swarm_algorithm.__dict__.copy()
for k in self.NON_HYPERPARAMS:
hyperparams.pop(k)
for k in hyperparams:
hyperparams[k] = str(hyperparams[k])
meta['AlgorithmHyperparams'] = hyperparams
with open(self.result_filename + '-meta.yaml', 'w') as f:
yaml.dump(meta, f, default_flow_style=False)
self.log_df['Epoch'] = | pd.to_numeric(self.log_df['Epoch'], downcast='integer') | pandas.to_numeric |
import os
import pandas as pd
import re
def load_diffs(keep_diff = False):
nick_map = {
'talk_diff_no_admin_sample.tsv': 'sample',
'talk_diff_no_admin_2015.tsv': '2015',
'all_blocked_user.tsv': 'blocked',
'd_annotated.tsv': 'annotated',
}
base = '../../data/samples/'
nss = ['user', 'article']
samples = [
'talk_diff_no_admin_sample.tsv',
'talk_diff_no_admin_2015.tsv',
'all_blocked_user.tsv',
'd_annotated.tsv'
]
d ={}
for s in samples:
dfs = []
for ns in nss:
inf = os.path.join(base, ns, 'scored', s)
df = pd.read_csv(inf, sep = '\t')
if not keep_diff:
del df['clean_diff']
df['ns'] = ns
dfs.append(df)
d[nick_map[s]] = augment(pd.concat(dfs))
d['blocked']['blocked'] = 1
return d
def is_ip(x):
pattern = r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
return re.match(pattern,str(x)) is not None
def augment(df):
df['author_anon'] = df['user_id'].isnull()
df['recipient_anon'] = df['page_title'].apply(is_ip)
df['rev_timestamp'] = | pd.to_datetime(df['rev_timestamp']) | pandas.to_datetime |
# Question 07, Lab 07
# AB Satyaprakash, 180123062
# imports
import pandas as pd
import numpy as np
# functions
def f(t, y):
return y - t**2 + 1
def F(t):
return (t+1)**2 - 0.5*np.exp(t)
def RungeKutta4(t, y, h):
k1 = f(t, y)
k2 = f(t+h/2, y+h*k1/2)
k3 = f(t+h/2, y+h*k2/2)
k4 = f(t+h, y+h*k3)
return y + h*(k1 + 2*k2 + 2*k3 + k4)/6
def AdamsBashforth(t, y, h):
return y[-1] + h*(55*f(t[-1], y[-1]) - 59*f(t[-2], y[-2]) + 37*f(t[-3], y[-3]) - 9*f(t[-4], y[-4]))/24
def AdasmMoulton(t, y, h):
t1 = t[-1]+h
y1 = AdamsBashforth(t, y, h)
return y[-1] + h*(9*f(t1, y1) + 19*f(t[-1], y[-1]) - 5*f(t[-2], y[-2]) + f(t[-3], y[-3]))/24
# program body
t = [0]
y = [0.5]
h = 0.2
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
yact = []
while t[-1] < 2:
y.append(AdasmMoulton(t, y, h))
t.append(round(t[-1]+h, 1))
for T in t:
yact.append(F(T))
df = pd.DataFrame()
df["Adam's Predictor-Corrector Method"] = pd.Series(y)
df['Actual Value'] = | pd.Series(yact) | pandas.Series |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import pathlib
import pytest
import pandas as pd
import numpy as np
import gradelib
EXAMPLES_DIRECTORY = pathlib.Path(__file__).parent / "examples"
GRADESCOPE_EXAMPLE = gradelib.Gradebook.from_gradescope(
EXAMPLES_DIRECTORY / "gradescope.csv"
)
CANVAS_EXAMPLE = gradelib.Gradebook.from_canvas(EXAMPLES_DIRECTORY / "canvas.csv")
# the canvas example has Lab 01, which is also in Gradescope. Let's remove it
CANVAS_WITHOUT_LAB_EXAMPLE = gradelib.Gradebook(
points=CANVAS_EXAMPLE.points.drop(columns="lab 01"),
maximums=CANVAS_EXAMPLE.maximums.drop(index="lab 01"),
late=CANVAS_EXAMPLE.late.drop(columns="lab 01"),
dropped=CANVAS_EXAMPLE.dropped.drop(columns="lab 01"),
)
# given
ROSTER = gradelib.read_egrades_roster(EXAMPLES_DIRECTORY / "egrades.csv")
def assert_gradebook_is_sound(gradebook):
assert gradebook.points.shape == gradebook.dropped.shape == gradebook.late.shape
assert (gradebook.points.columns == gradebook.dropped.columns).all()
assert (gradebook.points.columns == gradebook.late.columns).all()
assert (gradebook.points.index == gradebook.dropped.index).all()
assert (gradebook.points.index == gradebook.late.index).all()
assert (gradebook.points.columns == gradebook.maximums.index).all()
# assignments property
# -----------------------------------------------------------------------------
def test_assignments_are_produced_in_order():
assert list(GRADESCOPE_EXAMPLE.assignments) == list(
GRADESCOPE_EXAMPLE.points.columns
)
# keep_pids()
# -----------------------------------------------------------------------------
def test_keep_pids():
# when
actual = GRADESCOPE_EXAMPLE.keep_pids(ROSTER.index)
# then
assert len(actual.pids) == 3
assert_gradebook_is_sound(actual)
def test_keep_pids_raises_if_pid_does_not_exist():
# given
pids = ["A12345678", "ADNEDNE00"]
# when
with pytest.raises(KeyError):
actual = GRADESCOPE_EXAMPLE.keep_pids(pids)
# keep_assignments() and remove_assignments()
# -----------------------------------------------------------------------------
def test_keep_assignments():
# when
actual = GRADESCOPE_EXAMPLE.keep_assignments(["homework 01", "homework 02"])
# then
assert set(actual.assignments) == {"homework 01", "homework 02"}
assert_gradebook_is_sound(actual)
def test_keep_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.keep_assignments(assignments)
def test_remove_assignments():
# when
actual = GRADESCOPE_EXAMPLE.remove_assignments(
GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
)
# then
assert set(actual.assignments) == {
"homework 01",
"homework 02",
"homework 03",
"homework 04",
"homework 05",
"homework 06",
"homework 07",
"project 01",
"project 02",
}
assert_gradebook_is_sound(actual)
def test_remove_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.remove_assignments(assignments)
# combine()
# -----------------------------------------------------------------------------
def test_combine_with_keep_pids():
# when
combined = gradelib.Gradebook.combine(
[GRADESCOPE_EXAMPLE, CANVAS_WITHOUT_LAB_EXAMPLE], keep_pids=ROSTER.index
)
# then
assert "homework 01" in combined.assignments
assert "midterm exam" in combined.assignments
assert_gradebook_is_sound(combined)
def test_combine_raises_if_duplicate_assignments():
# the canvas example and the gradescope example both have lab 01.
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine([GRADESCOPE_EXAMPLE, CANVAS_EXAMPLE])
def test_combine_raises_if_indices_do_not_match():
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine(
[CANVAS_WITHOUT_LAB_EXAMPLE, GRADESCOPE_EXAMPLE]
)
# number_of_lates()
# -----------------------------------------------------------------------------
def test_number_of_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=labs)
# then
assert list(actual) == [1, 4, 2, 2]
def test_number_of_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=[])
def test_number_of_lates_with_no_assignment_list_uses_all_assignments():
# when
actual = GRADESCOPE_EXAMPLE.number_of_lates()
# then
assert list(actual) == [1, 5, 2, 2]
# forgive_lates()
# -----------------------------------------------------------------------------
def test_forgive_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [0, 1, 0, 0]
assert_gradebook_is_sound(actual)
def test_forgive_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=[])
def test_forgive_lates_forgives_the_first_n_lates():
# by "first", we mean in the order specified by the `within` argument
# student A10000000 had late lab 01, 02, 03, and 07
assignments = ["lab 02", "lab 07", "lab 01", "lab 03"]
# when
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=2, within=assignments)
# then
assert not actual.late.loc["A10000000", "lab 02"]
assert not actual.late.loc["A10000000", "lab 07"]
assert actual.late.loc["A10000000", "lab 01"]
assert actual.late.loc["A10000000", "lab 03"]
def test_forgive_lates_does_not_forgive_dropped():
# given
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
dropped = GRADESCOPE_EXAMPLE.dropped.copy()
dropped.iloc[:, :] = True
example = gradelib.Gradebook(
points=GRADESCOPE_EXAMPLE.points,
maximums=GRADESCOPE_EXAMPLE.maximums,
late=GRADESCOPE_EXAMPLE.late,
dropped=dropped,
)
# when
actual = example.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [1, 4, 2, 2]
assert_gradebook_is_sound(actual)
# drop_lowest()
# -----------------------------------------------------------------------------
def test_drop_lowest_on_simple_example_1():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(1, within=homeworks)
# then
assert actual.dropped.iloc[0, 1]
assert actual.dropped.iloc[1, 2]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_on_simple_example_2():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(2, within=homeworks)
# then
assert not actual.dropped.iloc[0, 2]
assert not actual.dropped.iloc[1, 0]
assert list(actual.dropped.sum(axis=1)) == [2, 2]
assert_gradebook_is_sound(actual)
def test_drop_lowest_counts_lates_as_zeros():
# given
columns = ["hw01", "hw02"]
p1 = pd.Series(data=[10, 5], index=columns, name="A1")
p2 = pd.Series(data=[10, 10], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([10, 10], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.iloc[0, 0] = True
# since A1's perfect homework is late, it should count as zero and be
# dropped
# when
actual = gradebook.drop_lowest(1)
# then
assert actual.dropped.iloc[0, 0]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_ignores_assignments_alread_dropped():
# given
columns = ["hw01", "hw02", "hw03", "hw04"]
p1 = pd.Series(data=[9, 0, 7, 0], index=columns, name="A1")
p2 = pd.Series(data=[10, 10, 10, 10], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([10, 10, 10, 10], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw02"] = True
gradebook.dropped.loc["A1", "hw04"] = True
# since A1's perfect homeworks are already dropped, we should drop a third
# homework, too: this will be HW03
# when
actual = gradebook.drop_lowest(1)
# then
assert actual.dropped.loc["A1", "hw04"]
assert actual.dropped.loc["A1", "hw02"]
assert actual.dropped.loc["A1", "hw03"]
assert list(actual.dropped.sum(axis=1)) == [3, 1]
assert_gradebook_is_sound(actual)
# give_equal_weights()
# -----------------------------------------------------------------------------
def test_give_equal_weights_on_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.give_equal_weights(within=homeworks)
# then
assert actual.maximums.loc["hw01"] == 1
assert actual.maximums.loc["hw02"] == 1
assert actual.maximums.loc["hw03"] == 1
assert actual.maximums.loc["lab01"] == 20
assert actual.points.loc["A1", "hw01"] == 1 / 2
assert actual.points.loc["A1", "hw02"] == 30 / 50
# score()
# -----------------------------------------------------------------------------
def test_score_on_simple_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [121 / 152, 24 / 152], atol=1e-6)
def test_score_counts_lates_as_zero():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.loc["A1", "hw01"] = True
gradebook.late.loc["A1", "hw03"] = True
gradebook.late.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [30 / 152, 9 / 152], atol=1e-6)
def test_score_ignores_dropped_assignments():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw01"] = True
gradebook.dropped.loc["A1", "hw03"] = True
gradebook.dropped.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [30 / 50, 9 / 52], atol=1e-6)
# total()
# -----------------------------------------------------------------------------
def test_total_on_simple_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
earned, available = gradebook.total(homeworks)
# then
assert np.allclose(earned.values, [121, 24], atol=1e-6)
assert np.allclose(available.values, [152, 152], atol=1e-6)
def test_total_counts_lates_as_zero():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.loc["A1", "hw01"] = True
gradebook.late.loc["A1", "hw03"] = True
gradebook.late.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
earned, available = gradebook.total(homeworks)
# then
assert np.allclose(earned.values, [30, 9], atol=1e-6)
assert np.allclose(available.values, [152, 152], atol=1e-6)
def test_total_ignores_dropped_assignments():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw01"] = True
gradebook.dropped.loc["A1", "hw03"] = True
gradebook.dropped.loc["A2", "hw03"] = True
homeworks = gradebook.assignments.starting_with("hw")
# when
earned, available = gradebook.total(homeworks)
# then
assert np.allclose(earned.values, [30, 9], atol=1e-6)
assert np.allclose(available.values, [50, 52], atol=1e-6)
# unify_assignments()
# -----------------------------------------------------------------------------
def test_unify_assignments():
"""test that points / maximums are added across unified assignments"""
# given
columns = ["hw01", "hw01 - programming", "hw02", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
HOMEWORK_01_PARTS = gradebook.assignments.starting_with("hw01")
# when
result = gradebook.unify_assignments({"hw01": HOMEWORK_01_PARTS})
# then
assert len(result.assignments) == 3
assert result.maximums["hw01"] == 52
assert result.points.loc["A1", "hw01"] == 31
assert result.maximums.shape[0] == 3
assert result.late.shape[1] == 3
assert result.dropped.shape[1] == 3
assert result.points.shape[1] == 3
def test_unify_assignments_with_multiple_in_dictionary():
"""test that points / maximums are added across unified assignments"""
# given
columns = ["hw01", "hw01 - programming", "hw02", "hw02 - testing"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = | pd.DataFrame([p1, p2]) | pandas.DataFrame |
from PIL import Image
from io import BytesIO
import pickle
import json
import numpy as np
import pandas as pd
from pykafka import KafkaClient
from pykafka.common import OffsetType
import requests
import os
from tornado import gen, httpserver, ioloop, log, web
import random
import time
import sys
IMAGE_FREQUENCY = 30
class MainHandler(web.RequestHandler):
@gen.coroutine
def get(self):
self.write(prediction_json)
def gen_client(hosts="127.0.0.1:9092", topic_name='people-detection'):
client = KafkaClient(hosts=hosts)
topic = client.topics[topic_name]
return client, topic
def decode(msg):
msg = pickle.loads(msg)
return msg
def Bbox(text_file):
df = | pd.DataFrame(text_file['predictions']) | pandas.DataFrame |
"""
@author: <NAME>, portions originally by JTay
"""
import numpy as np
import pandas as pd
import sklearn.model_selection as ms
from collections import defaultdict
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.utils import compute_sample_weight
import matplotlib.pyplot as plt
cv_folds = 5
# Balanced Accuracy is preferred when there is a class imbalance.
def balanced_accuracy(truth, pred):
wts = compute_sample_weight('balanced', truth)
return accuracy_score(truth, pred, sample_weight=wts)
scorer = make_scorer(balanced_accuracy)
def plot_learning_curve(ds_name, clf_type, train_size, train_scores, test_scores):
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html
plt.figure()
title = f"Learning Curve: {clf_type}, {ds_name}"
plt.title(title)
plt.xlabel("Training examples")
plt.ylabel("Score")
plt.grid()
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
train_low = train_scores_mean - train_scores_std
train_high = train_scores_mean + train_scores_std
test_low = test_scores_mean - test_scores_std
test_high = test_scores_mean + test_scores_std
plt.fill_between(train_size, train_low, train_high, alpha=0.1, color="r")
plt.fill_between(train_size, test_low, test_high, alpha=0.1, color="g")
plt.plot(train_size, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_size, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
def plot_validation_curve(ds_name, clf_type, df, param_col, param_name):
# loosely based on http://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html#sphx-glr-auto-examples-model-selection-plot-validation-curve-py
# because of how we get XVALS, this won't work for other algorithms yet.
plt.figure()
title = f"Validation Curve: {clf_type}, {ds_name}"
plt.title(title)
plt.xlabel(param_name)
plt.ylabel("Score")
plt.grid()
xvals = df[param_col].values.astype(np.float64)
train_scores_mean = df['mean_train_score'].values
train_scores_std = df['std_train_score'].values
test_scores_mean = df['mean_test_score'].values
test_scores_std = df['std_test_score'].values
train_low = train_scores_mean - train_scores_std
train_high = train_scores_mean + train_scores_std
test_low = test_scores_mean - test_scores_std
test_high = test_scores_mean + test_scores_std
plt.fill_between(xvals, train_low, train_high, alpha=0.1, color="r")
plt.fill_between(xvals, test_low, test_high, alpha=0.1, color="g")
plt.semilogx(xvals, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(xvals, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
def plot_validation_curves(ds_name, clf_type, df, param_col, param_name, dataframes, df_names):
# loosely based on http://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html#sphx-glr-auto-examples-model-selection-plot-validation-curve-py
# because of how we get XVALS, this won't work for other algorithms yet.
plt.figure()
title = f"Validation Curve: {clf_type}, {ds_name}"
plt.title(title)
plt.xlabel(param_name)
plt.ylabel("Score")
plt.grid()
colors = ['r', 'g', 'm', 'c', 'y']
i = 0
for df, dname in zip(dataframes, df_names):
xvals = df[param_col].values.astype(np.float64)
train_scores_mean = df['mean_train_score'].values
train_scores_std = df['std_train_score'].values
test_scores_mean = df['mean_test_score'].values
test_scores_std = df['std_test_score'].values
train_low = train_scores_mean - train_scores_std
train_high = train_scores_mean + train_scores_std
test_low = test_scores_mean - test_scores_std
test_high = test_scores_mean + test_scores_std
c1, c2 = colors[2 * i], colors[2 * i + 1]
plt.fill_between(xvals, train_low, train_high, alpha=0.1, color=c1)
plt.fill_between(xvals, test_low, test_high, alpha=0.1, color=c2)
plt.semilogx(xvals, train_scores_mean, 'o-', color=c1, label=f"Train Acc ({dname})")
plt.plot(xvals, test_scores_mean, 'o-', color=c2, label=f"CV Acc ({dname})")
i += 1
plt.legend(loc="best")
return plt
def basicResults(clfObj, trgX, trgY, tstX, tstY, params, clf_type=None, dataset=None):
np.random.seed(55)
if clf_type is None or dataset is None:
raise
cv = ms.GridSearchCV(clfObj, n_jobs=8, param_grid=params, refit=True, verbose=10, cv=cv_folds, scoring=scorer, error_score=0)
cv.fit(trgX, trgY)
regTable = pd.DataFrame(cv.cv_results_)
regTable.to_csv('./output/{}_{}_reg.csv'.format(clf_type, dataset), index=False)
test_score = cv.score(tstX, tstY)
# possibly delete file first? Somehwere, maybe not here, since it's cumulative I think.
with open('./output/test results.csv', 'a') as f:
f.write('{},{},{},{}\n'.format(clf_type, dataset, test_score, cv.best_params_))
# LEARNING CURVES
train_sizes = np.linspace(0.1, 1.0, 5) # defaults to: np.linspace(0.1, 1.0, 5)
train_size, train_scores, test_scores = ms.learning_curve(cv.best_estimator_, trgX, trgY, train_sizes=train_sizes, cv=cv_folds, verbose=10, scoring=scorer, n_jobs=8, shuffle=True)
curve_train_scores = pd.DataFrame(index=train_size, data=train_scores)
curve_test_scores = pd.DataFrame(index=train_size, data=test_scores)
curve_train_scores.to_csv('./output/{}_{}_LC_train.csv'.format(clf_type, dataset))
curve_test_scores.to_csv('./output/{}_{}_LC_test.csv'.format(clf_type, dataset))
try:
p = plot_learning_curve(dataset, clf_type, train_size, train_scores, test_scores)
print("Learning curve generated, saving...")
p.savefig('./output/plots/{}_{}_LC.png'.format(clf_type, dataset), bbox_inches='tight')
except Exception as e:
print(f"Error generating learning curve plots for {clf_type} {dataset}")
print(repr(e))
return cv
# Iteration learning curves
def iterationLC(clfObj, trgX, trgY, tstX, tstY, params, clf_type=None, dataset=None):
np.random.seed(55)
if clf_type is None or dataset is None:
raise
cv = ms.GridSearchCV(clfObj, n_jobs=8, param_grid=params, refit=True, verbose=10, cv=cv_folds, scoring=scorer)
cv.fit(trgX, trgY)
regTable = pd.DataFrame(cv.cv_results_)
regTable.to_csv('./output/ITER_base_{}_{}.csv'.format(clf_type, dataset), index=False)
d = defaultdict(list)
name = list(params.keys())[0]
for value in list(params.values())[0]:
d['param_{}'.format(name)].append(value)
clfObj.set_params(**{name: value})
clfObj.fit(trgX, trgY)
pred = clfObj.predict(trgX)
d['train acc'].append(balanced_accuracy(trgY, pred))
clfObj.fit(trgX, trgY)
pred = clfObj.predict(tstX)
d['test acc'].append(balanced_accuracy(tstY, pred))
print(value)
d = pd.DataFrame(d)
d.to_csv('./output/ITERtestSET_{}_{}.csv'.format(clf_type, dataset), index=False)
return cv
def LC_plot(X_train, X_test, y_train, y_test, estimator, clf_type, ds_name, fn_code):
train_sizes = np.linspace(0.1, 1.0, 5) # defaults to: np.linspace(0.1, 1.0, 5)
train_size, train_scores, test_scores = ms.learning_curve(estimator, X_train, y_train, train_sizes=train_sizes, cv=cv_folds, verbose=10, scoring=scorer, n_jobs=8, shuffle=True)
curve_train_scores = pd.DataFrame(index=train_size, data=train_scores)
curve_test_scores = | pd.DataFrame(index=train_size, data=test_scores) | pandas.DataFrame |
import pandas as pd
import re
from programs.data_cleaning.data_cleaning import box_data_cleaning
trends = pd.read_csv('../data/HWSysTrends11_5to11_12.csv')
# creates a list of building names based on regex patterns from the "Name Path Reference" column in the csv file
reg_list = [re.findall(r"(?<=\.)(B.*?)(?=\.)", i)[0] for i in trends['Name Path Reference']]
# adds the reg_list to the initial DataFrame as "Building"
trends['Building'] = reg_list
# Creates list of each Building name
buildings = pd.unique(trends['Building'])
cleaned_df = | pd.DataFrame() | pandas.DataFrame |
import sys
sys.path.append('../src/meta_rule/')
sys.path.append('../dd_lnn/')
import random
import time
import copy
import argparse
from meta_interpretive import BaseMetaPredicate, MetaRule, Project, DisjunctionRule
from train_test import score, align_labels
from read import load_data, load_metadata, load_labels
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from utils import evaluate, MyBatchSampler
from torch.utils.data import DataLoader, TensorDataset
def eval(df, filter_attr, filter_val, needle_attr, needle_val, true):
result = df.loc[(df[filter_attr] == filter_val)][[needle_attr, "prediction"]]
true_links = true.loc[(true[filter_attr] == filter_val)][[needle_attr]].to_numpy()[:,0].tolist()
filtered_result = []
score = None
for idx, r in result.iterrows():
needle_val1 = r[needle_attr]
prediction1 = r['prediction']
if needle_val1 == needle_val:
score = prediction1
if needle_val1 not in true_links:
filtered_result.append([needle_val1, prediction1])
haspath = None
if not score:
haspath = False
score = 0.0
else:
haspath = True
rank = 1 + len([pair for pair in filtered_result if pair[1] > score])
return score, rank, haspath
#driver for experiments with wn18 link prediction data
if __name__ == "__main__":
begtime = time.time()
alpha = 0.95
parser = argparse.ArgumentParser()
parser.add_argument("-bk", "--background", required=True, help="file containing schema information")
parser.add_argument("-ftr", "--facts_train", required=True, help="file containing facts for training")
parser.add_argument("-fte", "--facts_test", required=True, help="file containing facts for testing")
parser.add_argument("-t", "--target", required=True, help="the target predicate")
cl_args = parser.parse_args()
background_fname = cl_args.background
facts_fname_train = cl_args.facts_train
facts_fname_test = cl_args.facts_test
target = cl_args.target
dfs_train = load_metadata(background_fname)
load_data(facts_fname_train, dfs_train)
print("done reading (" + str(time.time()-begtime) + ")")
attr_name = None
labels_df_train = None
relations_train = []
rel_names_train = []
for name, df in dfs_train.items():
colnames = df.columns.values.tolist()
attr_name = colnames[0]
df.columns = [attr_name + "0", attr_name + "1"]
if target == name:
labels_df_train = df
else:
rel_names_train.append(name)
relations_train.append(df)
#creating inv
name = "inv" + name
cols = df.columns.tolist()
inv_df = df[[cols[1], cols[0]]].copy()
inv_df.columns = [cols[0], cols[1]]
rel_names_train.append(name)
relations_train.append(inv_df)
labels_df_train.columns = [attr_name + "0", attr_name + "3"]
labels_df_train['Label'] = 1.0
body0 = BaseMetaPredicate(relations_train)
print("done body0 (" + str(time.time()-begtime) + "s)")
body1 = copy.deepcopy(body0)
body1.df.columns = [attr_name + "2", attr_name + "3"]
join = MetaRule([body0, body1], [[[attr_name + "1"], [attr_name + "2"]]], alpha, False)
print("done join (" + str(time.time()-begtime) + "s)")
proj = Project(join, [attr_name + "0", attr_name + "3"])
print("done project (" + str(time.time()-begtime) + "s)")
metap = copy.deepcopy(body0)
metap.df.columns = [attr_name + "0", attr_name + "3"]
disj = DisjunctionRule([metap, proj], alpha, 0)
print("done disjunction (" + str(time.time()-begtime) + "s)")
meta = disj
df = labels_df_train
label = 'Label'
step = 1e-2
epochs = 50
y = align_labels(meta, df, label)
print("done label alignment (" + str(time.time()-begtime) + "s)")
grouped = meta.df.groupby([attr_name + "0"])
numgroups = len(grouped)
src_groups = []
pos = 0
for name, indices in grouped.groups.items():
if pos % 100000 == 0:
print(str(pos) + "/" + str(numgroups))
idx = indices.to_numpy().tolist()
if torch.sum(y[idx,:]) > 0:
src_groups += [idx]
pos = pos + 1
print("Added: " + str(len(src_groups)) + " (" + str(time.time()-begtime)+ "s)")
if len(src_groups) > 0:
data = TensorDataset(torch.arange(len(src_groups)))
loader = DataLoader(data, batch_size=32, shuffle=True)
optimizer = optim.Adam(meta.parameters(), lr=step)
loss_fn = nn.MarginRankingLoss(margin=0.5, reduction="mean")
iter = 0
for epoch in range(epochs):
train_loss = 0.0
for batch in loader:
pos_list = []
pos_len = []
neg_list = []
neg_len = []
for idx in batch[0]:
pos_idx = [i for i in src_groups[idx] if y[i] == 1]
neg_idx = [i for i in src_groups[idx] if y[i] == 0]
neg_idx = random.sample(neg_idx, min(len(pos_idx), len(neg_idx)))
pos_list += pos_idx
pos_len += [len(pos_idx)]
neg_list += neg_idx
neg_len += [len(neg_idx)]
meta.train()
optimizer.zero_grad()
yhat, slacks = meta(pos_list + neg_list)
pos_mat = torch.zeros(0)
neg_mat = torch.zeros(0)
curr_pos = 0
curr_neg = sum(pos_len)
for pos_cnt, neg_cnt in zip(pos_len, neg_len):
pos_yhat = yhat[curr_pos:curr_pos+pos_cnt]
pos_yhat = pos_yhat.repeat(neg_cnt, 1)[:,0]
neg_yhat = yhat[curr_neg:curr_neg+neg_cnt]
neg_yhat = torch.repeat_interleave(neg_yhat, pos_cnt, dim=0)[:,0]
curr_pos += pos_cnt
curr_neg += neg_cnt
pos_mat = torch.cat((pos_mat, pos_yhat))
neg_mat = torch.cat((neg_mat, neg_yhat))
loss = loss_fn(pos_mat, neg_mat, torch.ones(pos_mat.size()[0]))
train_loss = train_loss + loss.item() * pos_mat.size()[0]
print("Epoch " + str(epoch) + " (iteration=" + str(iter) + "): " + str(loss.item()))
loss.backward()
optimizer.step()
iter = iter + 1
print("Epoch=" + str(epoch) + " +ve.loss=" + str(train_loss))
print("done training (" +str(time.time()-begtime) + "s)")
dfs_test = load_metadata(background_fname)
load_data(facts_fname_test, dfs_test)
labels_df_test = dfs_test[target]
labels_df_test.columns = [attr_name + "0", attr_name + "3"]
labels_df_test['Label'] = 1.0
true_links = | pd.concat([labels_df_train, labels_df_test]) | pandas.concat |
from sklearn.manifold import TSNE
from kaldi_io import read_vec_flt_scp
import sys
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
# example usage
# python local/visualize_trait_emb.py age/accent/gender exp/vctk_lde/resnet_mfcc_3-8_200_32_mean_lde_sqr_asoftmax_m2/lde.scp 43873 output.png
# reference: https://towardsdatascience.com/visualising-high-dimensional-datasets-using-pca-and-t-sne-in-python-8ef87e7915b
# speaker-info.txt
# ID AGE GENDER ACCENTS REGION
# 225 23 F English Southern England
# 226 22 M English Surrey
# 227 38 M English Cumbria
# 228 22 F English Southern England
# 229 23 F English Southern England
# 230 22 F English Stockton-on-tees
# 231 23 F English Southern England
# 232 23 M English Southern England
#speaker_info = '/export/c01/jlai/nii/spk_enc/Erica_VCTK_processed/vctk-speaker-info.txt'
speaker_info = '/data/sls/scratch/clai24/data/Erica_VCTK_processed/vctk-speaker-info.txt'
with open(speaker_info, 'r') as f:
context = f.readlines()
context = [x.strip() for x in context][1:]
spk2trait = {}
for i in context:
spk = i.split()[0]
if spk != 's5': # add prefix 'p'
spk = 'p' + spk
if sys.argv[1] == 'age':
trait = int(i.split()[1])
elif sys.argv[1] == 'gender':
trait = i.split()[2]
elif sys.argv[1] == 'accent':
trait = i.split()[3]
spk2trait[spk] = trait
print('speaker to trait is %s' % spk2trait)
tsne = TSNE(n_components=2, verbose=1)
X, y = [], []
index = 0
for key,vec in read_vec_flt_scp(sys.argv[2]):
X.append(vec)
spk = key.split('-')[0]
y.append(spk2trait[spk])
#print(vec.shape)
#y.append(index)
index += 1
X, y = np.array(X), np.array(y)
print(len(y))
print(np.unique(y))
X_emb = tsne.fit_transform(X) # tsne transformed
# For reproducability of the results
np.random.seed(42)
N = int(sys.argv[3])
rndperm = np.random.permutation(X_emb.shape[0])
X_emb, y = X_emb[rndperm[:N]], y[rndperm[:N]]
feat_cols = [ 'pixel'+str(i) for i in range(X_emb.shape[1]) ]
df = | pd.DataFrame(X_emb,columns=feat_cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: cleaning tools for tidals (tidepool data analytics tools)
created: 2018-07
author: <NAME>
license: BSD-2-Clause
"""
import pandas as pd
import numpy as np
#Cleaning Functions
#removeNegativeDurations (Duplicate with differences)
#tslimCalibrationFix (Duplicate with differences)
#removeInvalidCgmValues
#round_time
#removeDuplicates
#removeCgmDuplicates
#largeTimezoneOffsetCorrection
#removeNegativeDurations (Duplicate with differences)
#removeInvalidCgmValues
#removeDuplicates
#remove_duplicates (Major Difference between the 2 projects)
#removeCgmDuplicates
#largeTimezoneOffsetCorrection
#NEED DATA
#tslimCalibrationFix (Duplicate with differences)
#largeTimezoneOffsetCorrection
##??
#convertDeprecatedTimezoneToAlias
# CLEAN DATA FUNCTIONS
def removeDuplicates(df, criteriaDF):
nBefore = len(df)
df = df.loc[~(df[criteriaDF].duplicated())]
df = df.reset_index(drop=True)
nDuplicatesRemoved = nBefore - len(df)
return df, nDuplicatesRemoved
def round_time(df, timeIntervalMinutes=5, timeField="time", roundedTimeFieldName="roundedTime", startWithFirstRecord=True,
verbose=False):
'''
A general purpose round time function that rounds the "time"
field to nearest <timeIntervalMinutes> minutes
INPUTS:
* a dataframe (df) that contains a time field that you want to round
* timeIntervalMinutes (defaults to 5 minutes given that most cgms output every 5 minutes)
* timeField to round (defaults to the UTC time "time" field)
* roundedTimeFieldName is a user specified column name (defaults to roundedTime)
* startWithFirstRecord starts the rounding with the first record if True, and the last record if False (defaults to True)
* verbose specifies whether the extra columns used to make calculations are returned
'''
df.sort_values(by=timeField, ascending=startWithFirstRecord, inplace=True)
df.reset_index(drop=True, inplace=True)
# make sure the time field is in the right form
t = pd.to_datetime(df[timeField])
# calculate the time between consecutive records
t_shift = pd.to_datetime(df[timeField].shift(1))
df["timeBetweenRecords"] = \
round((t - t_shift).dt.days*(86400/(60 * timeIntervalMinutes)) +
(t - t_shift).dt.seconds/(60 * timeIntervalMinutes)) * timeIntervalMinutes
# separate the data into chunks if timeBetweenRecords is greater than
# 2 times the <timeIntervalMinutes> minutes so the rounding process starts over
largeGaps = list(df.query("abs(timeBetweenRecords) > " + str(timeIntervalMinutes * 2)).index)
largeGaps.insert(0, 0)
largeGaps.append(len(df))
for gIndex in range(0, len(largeGaps) - 1):
chunk = t[largeGaps[gIndex]:largeGaps[gIndex+1]]
firstRecordChunk = t[largeGaps[gIndex]]
# calculate the time difference between each time record and the first record
df.loc[largeGaps[gIndex]:largeGaps[gIndex+1], "minutesFromFirstRecord"] = \
(chunk - firstRecordChunk).dt.days*(86400/(60)) + (chunk - firstRecordChunk).dt.seconds/(60)
# then round to the nearest X Minutes
# NOTE: the ".000001" ensures that mulitples of 2:30 always rounds up.
df.loc[largeGaps[gIndex]:largeGaps[gIndex+1], "roundedMinutesFromFirstRecord"] = \
round((df.loc[largeGaps[gIndex]:largeGaps[gIndex+1],
"minutesFromFirstRecord"] / timeIntervalMinutes) + 0.000001) * (timeIntervalMinutes)
roundedFirstRecord = (firstRecordChunk + | pd.Timedelta("1microseconds") | pandas.Timedelta |
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""PortfolioOpt: Financial Portfolio Optimization
This module provides a set of functions for financial portfolio
optimization, such as construction of Markowitz portfolios, minimum
variance portfolios and tangency portfolios (i.e. maximum Sharpe ratio
portfolios) in Python. The construction of long-only, long/short and
market neutral portfolios is supported."""
import numpy as np
import pandas as pd
import cvxopt as opt
import cvxopt.solvers as optsolvers
import warnings
__all__ = ['markowitz_portfolio',
'min_var_portfolio',
'tangency_portfolio',
'max_ret_portfolio',
'truncate_weights']
def markowitz_portfolio(cov_mat, exp_rets, target_ret,
allow_short=False, market_neutral=False):
"""
Computes a Markowitz portfolio.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
exp_rets: pandas.Series
Expected asset returns (often historical returns).
target_ret: float
Target return of portfolio.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
market_neutral: bool, optional
If 'False' sum of weights equals one.
If 'True' sum of weights equal zero, i.e. create a
market neutral portfolio (implies allow_short=True).
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
if not isinstance(exp_rets, pd.Series):
raise ValueError("Expected returns is not a Series")
if not isinstance(target_ret, float):
raise ValueError("Target return is not a float")
if not cov_mat.index.equals(exp_rets.index):
raise ValueError("Indices do not match")
if market_neutral and not allow_short:
warnings.warn("A market neutral portfolio implies shorting")
allow_short=True
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# exp_rets*x >= target_ret and x >= 0
G = opt.matrix(np.vstack((-exp_rets.values,
-np.identity(n))))
h = opt.matrix(np.vstack((-target_ret,
+np.zeros((n, 1)))))
else:
# exp_rets*x >= target_ret
G = opt.matrix(-exp_rets.values).T
h = opt.matrix(-target_ret)
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
if not market_neutral:
b = opt.matrix(1.0)
else:
b = opt.matrix(0.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = | pd.Series(sol['x'], index=cov_mat.index) | pandas.Series |
"""
Implement FlexMatcher.
This module is the main module of the FlexMatcher package and implements the
FlexMatcher class.
Todo:
* Extend the module to work with and without data or column names.
* Allow users to add/remove classifiers.
* Combine modules (i.e., create_training_data and training functions).
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import flexmatcher.classify as clf
import flexmatcher.utils as utils
from sklearn import linear_model
import numpy as np
import pandas as pd
import pickle
import time
class FlexMatcher:
"""Match a given schema to the mediated schema.
The FlexMatcher learns to match an input schema to a mediated schema.
The class considers panda dataframes as databases and their column names as
the schema. FlexMatcher learn to do schema matching by training on
instances of dataframes and how their columns are matched against the
mediated schema.
Attributes:
train_data (dataframe): Dataframe with 3 columns. The name of
the column in the schema, the value under that column and the name
of the column in the mediated schema it was mapped to.
col_train_data (dataframe): Dataframe with 2 columns. The name
the column in the schema and the name of the column in the mediated
schema it was mapped to.
data_src_num (int): Store the number of available data sources.
classifier_list (list): List of classifiers used in the training.
classifier_type (string): List containing the type of each classifier.
Possible values are 'column' and 'value' classifiers.
prediction_list (list): List of predictions on the training data
produced by each classifier.
weights (ndarray): A matrix where cell (i,j) captures how good the j-th
classifier is at predicting if a column should match the i-th
column (where columns are sorted by name) in the mediated schema.
columns (list): The sorted list of column names in the mediated schema.
"""
def __init__(self, dataframes, mappings, sample_size=300):
"""Prepares the list of classifiers that are being used for matching
the schemas and creates the training data from the input datafames
and their mappings.
Args:
dataframes (list): List of dataframes to train on.
mapping (list): List of dictionaries mapping columns of dataframes
to columns in the mediated schema.
sample_size (int): The number of rows sampled from each dataframe
for training.
"""
print('Create training data ...')
self.create_training_data(dataframes, mappings, sample_size)
print('Training data done ...')
unigram_count_clf = clf.NGramClassifier(ngram_range=(1, 1))
bigram_count_clf = clf.NGramClassifier(ngram_range=(2, 2))
unichar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(1, 1))
bichar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(2, 2))
trichar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(3, 3))
quadchar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(4, 4))
char_dist_clf = clf.CharDistClassifier()
self.classifier_list = [unigram_count_clf, bigram_count_clf,
unichar_count_clf, bichar_count_clf,
trichar_count_clf, quadchar_count_clf,
char_dist_clf]
self.classifier_type = ['value', 'value', 'value', 'value',
'value', 'value', 'value']
if self.data_src_num > 5:
col_char_dist_clf = clf.CharDistClassifier()
col_trichar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(3, 3))
col_quadchar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(4, 4))
col_quintchar_count_clf = clf.NGramClassifier(analyzer='char_wb',
ngram_range=(5, 5))
col_word_count_clf = \
clf.NGramClassifier(analyzer=utils.columnAnalyzer)
knn_clf = \
clf.KNNClassifier()
self.classifier_list = self.classifier_list + \
[col_char_dist_clf, col_trichar_count_clf,
col_quadchar_count_clf, col_quintchar_count_clf,
col_word_count_clf, knn_clf]
self.classifier_type = self.classifier_type + (['column'] * 6)
def create_training_data(self, dataframes, mappings, sample_size):
"""Transform dataframes and mappings into training data.
The method uses the names of columns as well as the data under each
column as its training data. It also replaces missing values with 'NA'.
Args:
dataframes (list): List of dataframes to train on.
mapping (list): List of dictionaries mapping columns of dataframes
to columns in the mediated schema.
sample_size (int): The number of rows sampled from each dataframe
for training.
"""
train_data_list = []
col_train_data_list = []
for (datafr, mapping) in zip(dataframes, mappings):
sampled_rows = datafr.sample(min(sample_size, datafr.shape[0]))
sampled_data = pd.melt(sampled_rows)
sampled_data.columns = ['name', 'value']
sampled_data['class'] = \
sampled_data.apply(lambda row: mapping[row['name']], axis=1)
train_data_list.append(sampled_data)
col_data = | pd.DataFrame(datafr.columns) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
with pytest.raises(TypeError):
s_1111 & 'a'
with pytest.raises(TypeError):
s_1111 & ['a', 'b', 'c', 'd']
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
with pytest.raises(TypeError):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
assert_series_equal(result, expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pandas-dev/pandas/issues/5284
with pytest.raises(TypeError):
d.__and__(s, axis='columns')
with pytest.raises(TypeError):
s & d
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
@pytest.mark.parametrize('op', [
operator.and_,
operator.or_,
operator.xor,
])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
dtype=bool)
result = op(ser, idx2)
assert_series_equal(result, expected)
@pytest.mark.parametrize("op, expected", [
(ops.rand_, pd.Index([False, True])),
(ops.ror_, pd.Index([False, True])),
(ops.rxor, pd.Index([])),
])
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
with pytest.raises(TypeError):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = | Series([True, False, True], index=index) | pandas.Series |
import numpy as np
import pandas as pd
import os
def to_categorical(data, dtype=None):
val_to_cat = {}
cat = []
index = 0
for val in data:
if dtype == 'ic':
if val not in ['1', '2', '3', '4ER+', '4ER-', '5', '6', '7', '8', '9', '10']:
val = '1'
if val in ['4ER+','4ER-']:
val='4'
if val not in val_to_cat:
val_to_cat[val] = index
cat.append(index)
index += 1
else:
cat.append(val_to_cat[val])
return np.array(cat)
def get_data(data):
d = {}
clin_fold = data[["METABRIC_ID"]]
rna = data[[col for col in data if col.startswith('GE')]]
rna = normalizeRNA(rna)
cna = data[[col for col in data if col.startswith('CNA')]]
d['ic'] = list(data['iC10'].values)
d['pam50'] = list(data['Pam50Subtype'].values)
d['er'] = list(data['ER_Expr'].values)
d['pr'] = list(data['PR_Expr'].values)
d['her2'] = list(data['Her2_Expr'].values)
d['drnp'] = list(data['DR'].values)
d['rnanp'] = rna.astype(np.float32).values
d['cnanp'] = ((cna.astype(np.float32).values + 2.0) / 4.0)
d['icnp'] = to_categorical(d['ic'], dtype='ic')
d['pam50np'] = to_categorical(d['pam50'])
d['ernp'] = to_categorical(d['er'])
d['prnp'] = to_categorical(d['pr'])
d['her2np'] = to_categorical(d['her2'])
d['drnp'] = to_categorical(d['drnp'])
"""
preprocessing for clinical data to match current pipeline
"""
## Clinical Data Quick Descriptions
# clin["Age_At_Diagnosis"] # Truly numeric
# clin["Breast_Tumour_Laterality"] # Categorical "L, R" (3 unique)
# clin["NPI"] # Truly numeric
# clin["Inferred_Menopausal_State"] # Categorical "Pre, Post" (3 unique)
# clin["Lymph_Nodes_Positive"] # Ordinal ints 0-24
# clin["Grade"] # Ordinal string (come on) 1-3 + "?"
# clin["Size"] # Truly Numeric
# clin["Histological_Type"] # Categorical strings (9 unique)
# clin["Cellularity"] # Categorical strings (4 unique)
# clin["Breast_Surgery"] # Categorical strings (3 Unique)
# clin["CT"] # Categorical strings (9 unique)
# clin["HT"] # Categorical strings (9 Unique)
# clin["RT"] # Categorical strings (9 Unique)
## Clinical Data Transformations
# On the basis of the above we will keep some as numeric and others into one-hot encodings
# (I am not comfortable binning the continuous numeric columns without some basis for their bins)
# Or since we dont have that much anyway just one hot everything and use BCE Loss to train
# We have to get the entire dataset, transform them into one-hots, bins
complete_data = r"MB.csv"
# complete_data = pd.read_csv(complete_data).set_index("METABRIC_ID")
complete_data = pd.read_csv(complete_data, index_col=None, header=0)
# Either we keep numerics as
clin_numeric = complete_data[["METABRIC_ID","Age_At_Diagnosis", "NPI", "Size"]]
# Numerical binned to arbitrary ranges then one-hot dummies
metabric_id = complete_data[["METABRIC_ID"]]
aad = pd.get_dummies( | pd.cut(complete_data["NPI"],10, labels=[1,2,3,4,5,6,7,8,9,10]) | pandas.cut |
"""
This step should only be performed afer handling Catagorical Variables
"""
import numpy as np
import pandas as pd
def Get_VIF(X):
"""[summary]
PARAMETERS :-
X = Pandas DataFrame
Return :-
Pandas DataFrame of Features and there VIF values
"""
def A(X):
vif_data = | pd.DataFrame() | pandas.DataFrame |
import concurrent.futures
import math
import multiprocessing
import os
import numba as nb
import numpy as np
import pandas as pd
EXPERIMENT=False
class SpectrumMatcher:
"""
Handles the creation of a uniqueness matrix.
"""
def __init__(self, provider, density, local, cutoff, validate, output_directory, validation_output_name):
"""Construct a matcher that can be used to calculate the uniqueness matrix.
:param provider: Provides the input files and locations used in calculations.
:type provider: FileProvider
:param density: PPMC threshold used to determine matches in the initial matching.
:type density: int
:param local: PPMC local threshold expressed as a percentage used to refine matches based on their PPMC.
:type local: int
:param cutoff: S/N cutoff value.
:type cutoff: int
:param validate: Determines whether or not the full uniqueness matrix is written to an outputfile validation_data.csv
:type validate: bool
"""
self.provider = provider
self.density = density
self.local = local
self.cutoff = cutoff
self.validate= validate
self.output_directory=output_directory
#output must be csv
if validation_output_name[-4:] != ".csv":
validation_output_name += ".csv"
self.validation_output=validation_output_name
def calculate_uniqueness_matrix(self):
"""Calculate the uniqueness matrix and optionally prints validation data.
When the flag -v is set, the validation data will be printed to an external file.
:return: The uniqueness matrix of all files contained in `provider`.
"""
maximum_pkl_length=self.provider.green_pkl_files.shape[1]
maximum_interval_length = np.amax(self.provider.intervals[:, 2] - self.provider.intervals[:, 1] + 1)
matcher_jit = SpectrumMatcherJIT(self.density, self.local, self.cutoff, self.provider.green_pkl_files,
self.provider.pkl_files_length, maximum_pkl_length, self.provider.green_fms_files,
self.provider.intervals, maximum_interval_length)
print("Sys: Start calculating using {0} processess...".format(multiprocessing.cpu_count()))
#parallellize the calculation of the uniqueness values
with concurrent.futures.ThreadPoolExecutor() as executor:
jobs_count = multiprocessing.cpu_count()
jobs = [executor.submit(calculate_uniqueness_matrix_step, matcher_jit, i, jobs_count) for i in range(jobs_count)]
#todo: optimize: via axis sum
uniqueness_matrix = jobs[0].result()[0]
shared_matrix=jobs[0].result()[1]
#todo: do something with shared_matrix
for job in jobs[1:]:
uniqueness_matrix += job.result()[0]
shared_matrix+=job.result()[1]
if self.validate:
self.print_validation(uniqueness_matrix, shared_matrix)
return uniqueness_matrix
def print_validation(self, uniqueness_matrix, shared_matrix=None):
"""Print the uniqueness matrix to an external file data_validation.csv
:param uniqueness_matrix: The matrix to be serialized.
:type uniqueness_matrix: array
:param shared_matrix: (Optional) Not currently used.
:type shared_matrix: array
:return: void
"""
# Sort the columns and rows of the matrix by descending row sum
column_sum = np.sum(uniqueness_matrix, axis=0)
sorted_indices = np.argsort(column_sum)[::-1]
validation_matrix = uniqueness_matrix[:, sorted_indices]
validation_matrix = validation_matrix[sorted_indices, :]
# Apply the new ordering to the filenames and fms_files
filenames = [self.provider.green_filenames[i] for i in sorted_indices.tolist()]
# Make sure the output file can be written to
outfilename=os.path.join(self.output_directory, self.validation_output)
try:
output= open(outfilename,"w+")
except IOError:
raise IOError
data=pd.DataFrame(validation_matrix,columns=filenames)
# Insert a column with the filenames
data.insert(0, '', pd.Series(filenames, index=data.index))
| pd.DataFrame.to_csv(data,output,index=False) | pandas.DataFrame.to_csv |
from pathlib import Path
import sklearn
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr
def calc_preds(model, x, y, mltype):
""" Calc predictions. """
if mltype == 'cls':
def get_pred_fn(model):
if hasattr(model, 'predict_proba'):
return model.predict_proba
if hasattr(model, 'predict'):
return model.predict
pred_fn = get_pred_fn(model)
if (y.ndim > 1) and (y.shape[1] > 1):
y_pred = pred_fn(x)
y_pred = np.argmax(y_pred, axis=1)
y_true = np.argmax(ydata, axis=1)
else:
y_pred = pred_fn(x)
y_true = y
elif mltype == 'reg':
y_pred = np.squeeze(model.predict(x))
y_true = np.squeeze(y)
return y_pred, y_true
def dump_preds(y_true, y_pred, meta=None, outpath='./preds.csv'):
""" Dump prediction and true values, with optional with metadata. """
y_true = | pd.Series(y_true, name='y_true') | pandas.Series |
#!/usr/bin/env python3
'''This module includes julia method and JuliaPlane class. The complex plane generated by the parent Class (ArrayComplexPlane) will be transformed by a returned function by julia() method to create a Julia plane. After the Julia plane is created, the method toCSV exports the plane to a plane.csv file with the parameters being used for the plane. Another method fromCSV will be used to import the parameters saved in the .csv file back and re-generate the Julia plane again.
'''
import cplane_np
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def julia(c, max=100):
'''The method julia() will take a complext number as an argument (c) and will return a function (f). The returned function will take an argument (z) and, depending on abs(z), will return an integer value.
arguments:
c: a complex number
max: an integer
returns:
f: a function. The returned function will take each value (z) of an existing plane and will transform it to an integer based on the following conditions. If abs(z) is bigger than 2, the function will return 1. If abs(2) is smaller than 2, it will run z = z**2 + +c, looping until it becomes bigger than 2. When it goes beyond 2, the function will return the number of the loop. However, if the number of loop reaches the maximum (100) before the Julia complex number gets bigger than 2, the function will stop and return 0.
'''
def f(z):
if abs(z) > 2:
return 1
else:
n = 0
while (abs(z) <= 2):
n+=1
if n > max:
return 0
z = z**2 + c
return n
return f
class JuliaPlane(cplane_np.ArrayComplexPlane):
'''This class subclsses ArrayComplexPlane to generate its complex plane. After tha, it will creat a Julia plane using the Class argument (c) and the julia method. The class will include toCSV and fromCSV to export and import the Julia plane and its implementaiton parameters'''
def __init__(self, c):
'''The method will transform the plane created by the parent class to a Julia plane, when the class gets instantiated.
arguments:
c: a complex humber
'''
self.c = c
super().__init__(-2,2,1000,-2,2,1000)
# print("initial plane: \n", self.plane)
self.plane = self.plane.applymap(julia(c))
def show(self):
'''This method will draw matplotlib's imshow plot using the integer numbers in self.plane.'''
plt.imshow(self.plane, cmap = 'hot', interpolation='bicubic', extent=(self.xmin, self.xmax, self.ymin, self.ymax))
plt.title("C = " + str(self.c))
plt.colorbar(shrink=.94)
plt.show()
def toCSV(self, filename):
'''This method will take a filename to export the Julia plane to a .csv file. It also exports the parameters that are used to create the Julia plane.
arguemnts:
filename: a string of a filename
'''
params = | pd.Series( [self.c, self.xmin, self.xmax, self.xlen, self.ymin, self.ymax, self.ylen], index=['c','xmin','xmax','xlen', 'ymin', 'ymax', 'ylen'] ) | pandas.Series |
'''
Author: <NAME>
GitHub: https://github.com/josephlyu
The figures for the UK page, using data from
Public Health Englend's COVID-19 UK API and
Oxford University's GitHub repository.
Link1: https://coronavirus.data.gov.uk/developers-guide
Link2: https://github.com/OxCGRT/covid-policy-tracker
'''
import os
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from uk_covid19 import Cov19API
import datetime as dt
from pmdarima.arima import auto_arima
from tensorflow.keras import Model, Input, callbacks
from tensorflow.keras.layers import LSTM, Dense, Dropout
##### DEFINE GLOBAL VARIABLES #####
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
COLORS = {'text':'#114b5f', 'background': '#e6ecec', 'case':'#3f6678', 'death':'#ba3a0a', 'pred':'#d7d9db',
'case_scatter':'#7498a8', 'case_pred':'#005f86', 'death_scatter':'#d88f74', 'death_pred':'#b8272e',
'index_case':'#666666', 'index_death':'#c65d35', 'index_text':'#9e9e9e'}
FIXED_ANNOTATIONS = [dict(x=-0.03, y=1.15, text='<b><i>Model Selection:</i></b>', font={'size':13}, xref='paper', yref='paper', showarrow=False),
dict(x=0.875, y=1.17, text='<b><i>Major<br>Events</i></b>', xref='paper', yref='paper', showarrow=False)]
UK_CASES_EVENTS = [dict(x='2020-3-23', y=1198, text='National Lockdown', ax=-30, ay=-40, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-7-4', y=575, text='(Eat out to Help out)<br>Lockdown Eased', ax=-20, ay=-35, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-9-7', y=2532, text='University Students Return', ax=-65, ay=-65, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-11-5', y=22826, text='Second Lockdown', ax=-90, ay=-25, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-12-2', y=14400, text='(Christmas Period)<br>Lockdown Eased', ax=-45, ay=-90, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-12-21', y=34396, text='Mass Vaccination<br>(1% Population)', ax=-45, ay=-65, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-1-5', y=59344, text='Third Lockdown', ax=-70, ay=-20, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-1-12', y=51221, text='Mass Vaccination<br>(5% Population)', ax=80, ay=-15, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-2-5', y=17714, text='Mass Vaccination<br>(20% Population)', ax=55, ay=-55, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-3-19', y=5485, text='Mass Vaccination<br>(50% Population)', ax=15, ay=-40, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1)]
UK_DEATHS_EVENTS = [dict(x='2020-3-23', y=103, text='National Lockdown', ax=-55, ay=-35, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-7-4', y=43, text='(Eat out to Help out)<br>Lockdown Eased', ax=15, ay=-55, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-9-7', y=12, text='University Students Return', ax=-25, ay=-25, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-11-5', y=332, text='Second Lockdown', ax=-85, ay=-10, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-12-2', y=427, text='(Christmas Period)<br>Lockdown Eased', ax=-100, ay=-35, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2020-12-21', y=512, text='Mass Vaccination<br>(1% Population)', ax=-80, ay=-65, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-1-5', y=809, text='Third Lockdown', ax=-70, ay=-65, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-1-12', y=1066, text='Mass Vaccination<br>(5% Population)', ax=-50, ay=-75, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-2-5', y=891, text='Mass Vaccination<br>(20% Population)', ax=65, ay=-50, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1),
dict(x='2021-3-19', y=85, text='Mass Vaccination<br>(50% Population)', ax=30, ay=-50, arrowhead=5, arrowcolor=COLORS['text'], arrowwidth=1)]
CASES_UPDATE_MENUS = [dict(type='dropdown', direction='down', x=0.214, y=1.17, buttons=list([
dict(label='Ensemble', method='update', args=[{'visible': [True, True, True, True, True, False, False, False, False, False, False]}]),
dict(label='ARIMA', method='update', args=[{'visible': [True, True, False, False, False, True, True, True, False, False, False]}]),
dict(label='LSTM', method='update', args=[{'visible': [True, True, False, False, False, False, False, False, True, True, True]}])])),
dict(type='buttons', direction='right', x=1, y=1.17, buttons=list([
dict(label='Show', method='update', args=[{}, {'annotations': UK_CASES_EVENTS + FIXED_ANNOTATIONS}]),
dict(label='Hide', method='update', args=[{}, {'annotations': FIXED_ANNOTATIONS}])]))]
DEATHS_UPDATE_MENUS = [dict(type='dropdown', direction='down', x=0.214, y=1.17, buttons=list([
dict(label='Ensemble', method='update', args=[{'visible': [True, True, True, True, True, False, False, False, False, False, False]}]),
dict(label='ARIMA', method='update', args=[{'visible': [True, True, False, False, False, True, True, True, False, False, False]}]),
dict(label='LSTM', method='update', args=[{'visible': [True, True, False, False, False, False, False, False, True, True, True]}])])),
dict(type='buttons', direction='right', x=1, y=1.17, buttons=list([
dict(label='Show', method='update', args=[{}, {'annotations': UK_DEATHS_EVENTS + FIXED_ANNOTATIONS}]),
dict(label='Hide', method='update', args=[{}, {'annotations': FIXED_ANNOTATIONS}])]))]
##### DEFINE FUNCTIONS TO CONSTRUCT MODELS #####
def to_feature(feature_series, cases=True):
target_index = INDEX_CASES if cases else INDEX_DEATHS
feature_index = feature_series.index
if target_index[0] in feature_index:
feature_index = feature_series[target_index[0]:].index
feature_series = feature_series[feature_index]
if target_index[-1] in feature_index:
return feature_series[:target_index[-1]].tolist()
else:
padding_right = [feature_series[-1] for n in range(len(target_index)-len(feature_index))]
return feature_series.tolist() + padding_right
else:
if target_index[-1] in feature_index:
feature_index = feature_series[:target_index[-1]].index
feature_series = feature_series[feature_index]
padding_left = [feature_series[0] for n in range(len(target_index)-len(feature_index))]
return padding_left + feature_series.tolist()
else:
padding_left = [feature_series[0] for n in range(target_index.tolist().index(feature_index[0]))]
padding_right = [feature_series[-1] for n in range(len(target_index)-len(feature_index)-len(padding_left))]
return padding_left + feature_series.tolist() + padding_right
def to_sequence(data, features=[], input_size=7, output_size=21):
x, y, arrs = [], [], [data] + features
for i in range(len(data)-input_size-output_size+1):
x.append([[arr[n] for arr in arrs] for n in range(i,i+input_size)])
y.append(data[i+input_size:i+input_size+output_size])
return np.array(x), np.array(y)
def scale(original):
return [(n-min(original)) / (max(original)-min(original)) for n in original]
def unscale(scaled, cases=True):
original = DATA_UK_CASES_DIFF_LIST if cases else DATA_UK_DEATHS_DIFF_LIST
return [n*(max(original)-min(original)) + min(original) for n in scaled]
def undifference(difference, start_index, cases=True):
start = DATA_UK_CASES_LIST[start_index] if cases else DATA_UK_DEATHS_LIST[start_index]
undifferenced = [difference[0] + start]
for i in range(1, len(difference)):
undifferenced.append(difference[i] + undifferenced[i-1])
return undifferenced
def get_original(difference_scaled, start_index, cases=True):
return undifference(unscale(difference_scaled, cases), start_index, cases)
def predict_LSTM(model, latest_data, cases=True):
return get_original(model(latest_data).numpy()[0], -1, cases)
def result_LSTM(model, latest_data, cases=True, output_size=21, n_iter=10):
outputs = np.zeros((n_iter, output_size))
for i in range(n_iter):
outputs[i] = predict_LSTM(model, latest_data, cases)
return outputs.mean(axis=0), np.percentile(outputs,2.5,axis=0), np.percentile(outputs,97.5,axis=0)
def construct_LSTM(cases=True):
num_features = 4 if cases else 3
inputs = Input(shape=(None, num_features))
x = LSTM(128, return_sequences=True)(inputs)
x = Dropout(0.2)(x, training=True)
x = LSTM(64)(x)
x = Dropout(0.2)(x, training=True)
x = Dense(128, 'relu')(x)
x = Dense(64, 'relu')(x)
outputs = Dense(21)(x)
model = Model(inputs, outputs)
if cases:
x_train, y_train = to_sequence(DATA_UK_CASES_DIFF_LIST_SCALED, [FEATURE_STRINGENCY_FOR_CASES, FEATURE_VACCINATION_FOR_CASES, FEATURE_TESTS_FOR_CASES])
latest_data, _ = to_sequence(DATA_UK_CASES_DIFF_LIST_SCALED[-7:], [FEATURE_STRINGENCY_FOR_CASES[-7:], FEATURE_VACCINATION_FOR_CASES[-7:], FEATURE_TESTS_FOR_CASES[-7:]], output_size=0)
else:
x_train, y_train = to_sequence(DATA_UK_DEATHS_DIFF_LIST_SCALED, [FEATURE_CASES_FOR_DEATHS, FEATURE_VACCINATION_FOR_DEATHS])
latest_data, _ = to_sequence(DATA_UK_DEATHS_DIFF_LIST_SCALED[-7:], [FEATURE_CASES_FOR_DEATHS[-7:], FEATURE_VACCINATION_FOR_DEATHS[-7:]], output_size=0)
earlyStopping = callbacks.EarlyStopping(monitor='loss', patience=10, restore_best_weights=True)
model.compile('adam', 'mse')
model.fit(x_train, y_train, callbacks=earlyStopping, epochs=300, verbose=0)
return result_LSTM(model, latest_data, cases)
def construct_ARIMA(cases=True):
data = DATA_UK_CASES_LIST if cases else DATA_UK_DEATHS_LIST
model = auto_arima(data, seasonal=False, test='adf', information_criterion='bic',
error_action='ignore', suppress_warnings=True, njob=-1)
return model.predict(21, return_conf_int=True)
##### LOAD DATA #####
filters_uk = ['areaType=overview']
structure_total = {
'date': 'date',
'newcases': 'newCasesByPublishDate',
'cumcases': 'cumCasesByPublishDate',
'newdeaths': 'newDeaths28DaysByPublishDate',
'cumdeaths': 'cumDeaths28DaysByPublishDate'
}
api_uk = Cov19API(filters_uk, structure_total)
df_uk = api_uk.get_dataframe()
df_uk['date'] = df_uk['date'].astype('datetime64[ns]')
df_uk_cases = df_uk.query('cumcases >= 1')
df_uk_deaths = df_uk.query('cumdeaths >= 1')
##### COMPONENTS FOR INDEX PAGE #####
api_uk_timestamp = api_uk.last_update
api_uk_last_update = api_uk_timestamp[:10] + ' ' + api_uk_timestamp[11:19] + ' UTC'
api_uk_date_str = dt.datetime.strptime(api_uk_timestamp[2:10], '%y-%m-%d')
api_uk_date = api_uk_date_str.strftime('%d %B, %Y')
today_uk_newcases, today_uk_newdeaths = df_uk['newcases'][0], df_uk['newdeaths'][0]
today_uk_cumcases, today_uk_cumdeaths = df_uk['cumcases'][0], df_uk['cumdeaths'][0]
fig_index_cases = go.Figure()
fig_index_cases.add_scatter(x=df_uk_cases['date'], y=df_uk_cases['cumcases'], line={'color':COLORS['index_case']}, fill='tozeroy')
fig_index_cases.update_layout(font={'color':COLORS['index_text']}, hovermode='closest', template='none', margin={'l':0, 'r':0, 't':10, 'b':25}, height=130)
fig_index_cases.update_xaxes(showgrid=False, showline=True, linecolor=COLORS['index_text'], tickformat='%d/%m')
fig_index_cases.update_yaxes(nticks=3)
fig_index_deaths = go.Figure()
fig_index_deaths.add_scatter(x=df_uk_cases['date'], y=df_uk_cases['cumdeaths'], line={'color':COLORS['index_death']}, fill='tozeroy')
fig_index_deaths.update_layout(font={'color':COLORS['index_text']}, hovermode='closest', template='none', margin={'l':0, 'r':0, 't':10, 'b':25}, height=130)
fig_index_deaths.update_xaxes(showgrid=False, showline=True, linecolor=COLORS['index_text'], tickformat='%d/%m')
fig_index_deaths.update_yaxes(nticks=3)
##### PREPARE DATA FOR FORECASTING #####
data_uk_cases = df_uk_cases[['date', 'newcases']].sort_index(ascending=False).set_index('date')
data_uk_cases_avg = data_uk_cases['newcases'].rolling(7).mean().round()[6:]
data_uk_cases_avg_diff = data_uk_cases_avg.diff()[1:]
LEN_CASES = len(data_uk_cases_avg_diff)
INDEX_CASES = data_uk_cases_avg_diff.index
DATA_UK_CASES_LIST = data_uk_cases_avg.tolist()
DATA_UK_CASES_DIFF_LIST = data_uk_cases_avg_diff.tolist()
DATA_UK_CASES_DIFF_LIST_SCALED = scale(DATA_UK_CASES_DIFF_LIST)
data_uk_deaths = df_uk_deaths[['date', 'newdeaths']].sort_index(ascending=False).set_index('date')
data_uk_deaths_avg = data_uk_deaths['newdeaths'].rolling(7).mean().round()[6:]
data_uk_deaths_avg_diff = data_uk_deaths_avg.diff()[1:]
LEN_DEATHS = len(data_uk_deaths_avg_diff)
INDEX_DEATHS = data_uk_deaths_avg_diff.index
DATA_UK_DEATHS_LIST = data_uk_deaths_avg.tolist()
DATA_UK_DEATHS_DIFF_LIST = data_uk_deaths_avg_diff.tolist()
DATA_UK_DEATHS_DIFF_LIST_SCALED = scale(DATA_UK_DEATHS_DIFF_LIST)
FEATURE_CASES_FOR_DEATHS = DATA_UK_CASES_DIFF_LIST_SCALED[-LEN_DEATHS:]
FEATURE_DEATHS_FOR_CASES = [0 for n in range(LEN_CASES-LEN_DEATHS)] + DATA_UK_DEATHS_DIFF_LIST_SCALED
url_stringency = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/timeseries/stringency_index.csv'
df_stringency = pd.read_csv(url_stringency, index_col=0)
df_stringency_uk = df_stringency.query('country_code == "GBR"').T[2:]
df_stringency_uk.index = pd.to_datetime(df_stringency_uk.index)
df_stringency_uk.columns = ['stringency']
df_stringency_uk = df_stringency_uk.fillna(method='pad')
stringency_uk = df_stringency_uk['stringency']
stringency_uk_cases, stringency_uk_deaths = to_feature(stringency_uk), to_feature(stringency_uk, False)
FEATURE_STRINGENCY_FOR_CASES, FEATURE_STRINGENCY_FOR_DEATHS = scale(stringency_uk_cases), scale(stringency_uk_deaths)
url_vaccination_uk = 'https://api.coronavirus.data.gov.uk/v2/data?areaType=overview&metric=cumVaccinationFirstDoseUptakeByPublishDatePercentage&format=csv'
df_vaccination_uk = | pd.read_csv(url_vaccination_uk, index_col=3) | pandas.read_csv |
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize("func, values", [
("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
])
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
'c_int': [1, 2, 3, 4],
'c_float': [4.02, 3.03, 2.04, 1.05],
'c_date': ['2019', '2018', '2016', '2017']})
df['c_date'] = pd.to_datetime(df['c_date'])
result = getattr(df.groupby('name'), func)()
expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
def test_nunique_preserves_column_level_names():
# GH 23222
test = pd.DataFrame([1, 2, 2],
columns=pd.Index(['A'], name="level_0"))
result = test.groupby([0, 0, 0]).nunique()
expected = pd.DataFrame([2], columns=test.columns)
tm.assert_frame_equal(result, expected)
# count
# --------------------------------
def test_groupby_timedelta_cython_count():
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
| tm.assert_series_equal(count_B, expected['B']) | pandas.util.testing.assert_series_equal |
from txtai.embeddings import Embeddings
from txtai.pipeline import Similarity
from txtai.ann import ANN
import os
import json
import numpy as np
import pandas as pd
import logging
import pickle
from gamechangerml.src.text_handling.corpus import LocalCorpus
import torch
logger = logging.getLogger(__name__)
class SentenceEncoder(object):
"""
Handles text encoding and creating of ANNOY index
for the initial search
Args:
encoder_model (str): Model name supported by huggingface
and txtai to generate the document embeddings
use_gpu (bool): Boolean to check if a GPU would be used
"""
def __init__(self, encoder_model=None, use_gpu=False):
if encoder_model:
self.encoder_model = encoder_model
else:
self.encoder_model = "sentence-transformers/msmarco-distilbert-base-v2"
if use_gpu and torch.cuda.is_available():
self.use_gpu = use_gpu
else:
self.use_gpu = False
self.embedder = Embeddings(
{"method": "transformers", "path": self.encoder_model, "gpu": self.use_gpu}
)
def _index(self, corpus, index_path, overwrite=False):
"""
Builds an embeddings index.
Args:
corpus: list of (id, text|tokens, tags)
index_path: Path of where to store and reference
existing index
overwrite: Boolean check to predict whether if an
existing index will be overwritten
"""
# Transform documents to embeddings vectors
ids, dimensions, stream = self.embedder.model.index(corpus)
# Load streamed embeddings back to memory
embeddings = np.empty((len(ids), dimensions), dtype=np.float32)
with open(stream, "rb") as queue:
for x in range(embeddings.shape[0]):
embeddings[x] = pickle.load(queue)
# Remove temporary file
os.remove(stream)
all_text = []
for para_id, text, _ in corpus:
all_text.append([text, para_id])
df = pd.DataFrame(all_text, columns=["text", "paragraph_id"])
embedding_path = os.path.join(index_path, "embeddings.npy")
dataframe_path = os.path.join(index_path, "data.csv")
ids_path = os.path.join(index_path, "doc_ids.txt")
# Load new data
if os.path.isfile(embedding_path) and (overwrite is False):
old_embed_path = os.path.join(index_path, "embeddings.npy")
old_dataframe_path = os.path.join(index_path, "data.csv")
old_ids_path = os.path.join(index_path, "doc_ids.txt")
# Load existing embeddings
old_embeddings = np.load(old_embed_path) # LOAD EMBEDDINGS
with open(old_ids_path, "r") as fp:
old_ids = fp.readlines()
old_ids = [doc_id[:-1] for doc_id in old_ids]
# Remove embeddings with document id overlaps
embeddings = np.vstack((old_embeddings, embeddings))
# Append new dataframe
old_df = pd.read_csv(old_dataframe_path)
df = | pd.concat([old_df, df]) | pandas.concat |
import hbase
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from io import StringIO
import matplotlib.pyplot as plt
zk = '192.168.1.19:2181,192.168.1.20:2181,192.168.1.21:2181'
def timespan(series):
return series[-1] - series[0]
def lastele(series):
return series[-1] if not series.empty else None
if __name__ == '__main__':
with hbase.ConnectionPool(zk).connect() as conn:
dfs = []
# filter conditions
devids = []
dt_start = datetime(2019, 1, 10)
# dt_end = dt_start + timedelta(1)
dt_end = datetime(2019, 1, 15)
# Get data from hbase
table = conn['default']['iot-anlyz']
# # filter
filterlist = [f"PrefixFilter(=, 'substring:{reversed(devid)}')" for devid in devids]
filter = None
if filterlist:
filter = ' AND '.join(filterlist)
# scan
for row in table.scan(filter_=filter if filter else None):
if not row.get('cf:devid'):
continue
devid = str(row.get('cf:devid'), encoding='utf8')
# print(f'devid={devid}')
ts = np.int64(row.get('cf:start')) / 1000
valid_ts = dt_start.timestamp() < ts and ts < dt_end.timestamp()
# print(f'dt_start={dt_start.timestamp()}, ts={ts/1000}, dt_end={dt_end.timestamp()}')
if not valid_ts:
continue
# startts = row['cf:startts']
if not row.get('cf:positions'):
continue
moves = str(row.get('cf:positions'), encoding='utf8')
df = pd.read_csv(StringIO(moves.replace(';', '\n')),
header=None,
names=['Timestamp', 'Lon', 'Lat', 'Distance'])
df = df.dropna(how='all')
df = df.fillna(0.0)
df['Devid'] = devid
# df['Vel'] = df['Distance'] / (df['Timestamp'] - df['Timestamp'].shift(1)) * 1000 * 60
df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='ms').dt.tz_localize('UTC').dt.tz_convert(
'Asia/Shanghai')
df['Date'] = df['Timestamp'].dt.date
df['Time'] = df['Timestamp'].dt.time
df = df.set_index('Timestamp')
# print(df.rolling('5min')['Distance'].agg(np.sum))
df2 = df.resample('5T').agg({'Devid': lastele, 'Lon': lastele, 'Lat': lastele,
'Date': lastele, 'Time': lastele, 'Distance': np.sum})
df2['Vel'] = df2['Distance'] / 5
df2 = df2.ffill()
print(df2)
dfs.append(df2)
# ######
if dfs:
final_df = | pd.concat(dfs) | pandas.concat |
#Python wrapper / library for Einstein Analytics API
#core libraries
import sys
import logging
import json
import time
from dateutil import tz
import re
from decimal import Decimal
import base64
import csv
import math
import pkg_resources
# installed libraries
import browser_cookie3
import requests
import unicodecsv
from unidecode import unidecode
import datetime
import pandas as pd
from pandas import json_normalize
import numpy as np
#init logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logging.basicConfig(format="%(levelname)s: %(message)s")
class salesforceEinsteinAnalytics(object):
def __init__(self, env_url, browser, rawcookie=None, cookiefile=None, logLevel='WARN'):
self.setLogLvl(level=logLevel)
self.env_url = env_url
#Check if package is current version
response = requests.get('https://pypi.org/pypi/SalesforceEinsteinAnalytics/json')
latest_version = response.json()['info']['version']
curr_version = pkg_resources.get_distribution("SalesforceEinsteinAnalytics").version
if curr_version != latest_version:
logging.warning('New version available. Use "pip install SalesforceEinsteinAnalytics --upgrade" to upgrade.')
#get browser cookie to use in request header
if rawcookie != None:
self.header = {'Authorization': 'Bearer '+rawcookie, 'Content-Type': 'application/json'}
elif cookiefile != None:
print('using cookiefile')
try:
if browser == 'chrome':
cj = browser_cookie3.chrome(domain_name=env_url[8:], cookie_file=cookiefile)
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
elif browser == 'firefox':
cj = browser_cookie3.firefox(domain_name=env_url[8:], cookie_file=cookiefile)
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
else:
logging.error('Please select a valid browser (chrome or firefox)')
sys.exit(1)
except:
logging.error('ERROR: Could not get session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).')
sys.exit(1)
else:
try:
if browser == 'chrome':
cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect "https://"
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
elif browser == 'firefox':
cj = browser_cookie3.firefox(domain_name=env_url[8:])
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
else:
logging.error('Please select a valid browser (chrome or firefox)')
sys.exit(1)
except:
logging.error('ERROR: Could not get session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).')
sys.exit(1)
def setLogLvl(self, level='WARN'):
if level == 'DEBUG':
logging.getLogger().setLevel(logging.DEBUG)
elif level == 'INFO':
logging.getLogger().setLevel(logging.INFO)
elif level == 'WARN':
logging.getLogger().setLevel(logging.WARN)
else:
logging.getLogger().setLevel(logging.ERROR)
def get_local_time(self, add_sec=None, timeFORfile=False):
#set timezone for displayed operation start time
curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
if add_sec is not None:
return (curr_time + datetime.timedelta(seconds=add_sec)).strftime("%I:%M:%S %p")
elif timeFORfile == True:
return curr_time.strftime("%m_%d_%Y__%I%p")
else:
return curr_time.strftime("%I:%M:%S %p")
def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False):
params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name}
dataset_json = requests.get(self.env_url+'/services/data/v46.0/wave/datasets', headers=self.header, params=params)
dataset_df = json_normalize(json.loads(dataset_json.text)['datasets'])
#check if the user wants to seach by API name or label name
if search_type == 'UI Label':
dataset_df = dataset_df[dataset_df['label'] == dataset_name]
else:
dataset_df = dataset_df[dataset_df['name'] == dataset_name]
#show user how many matches that they got. Might want to use exact API name if getting multiple matches for label search.
if verbose == True:
print('Found '+str(dataset_df.shape[0])+' matching datasets.')
#if dataframe is empty then return not found message or return the dataset ID
if dataset_df.empty == True:
logging.warning('Dataset not found. Please check name or API name in Einstein Analytics.')
sys.exit(1)
else:
dsnm = dataset_df['name'].tolist()[0]
dsid = dataset_df['id'].tolist()[0]
#get dataset version ID
r = requests.get(self.env_url+'/services/data/v46.0/wave/datasets/'+dsid, headers=self.header)
dsvid = json.loads(r.text)['currentVersionId']
return dsnm, dsid, dsvid
def run_saql_query(self, saql, save_path=None, verbose=False):
'''
This function takes a saql query as an argument and returns a dataframe or saves to csv
The query can be in JSON form or can be in the UI SAQL form
load statements must have the appropreate spaces: =_load_\"datasetname\";
'''
if verbose == True:
start = time.time()
print('Checking SAQL and Finding Dataset IDs...')
print('Process started at: '+str(self.get_local_time()))
saql = saql.replace('\"','\\"') #convert UI saql query to JSON format
#create a dictionary with all datasets used in the query
load_stmt_old = re.findall(r"(= load )(.*?)(;)", saql)
load_stmt_new = load_stmt_old.copy()
for ls in range(0,len(load_stmt_new)):
load_stmt_old[ls] = ''.join(load_stmt_old[ls])
dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\"',''), verbose=verbose)
load_stmt_new[ls] = ''.join(load_stmt_new[ls])
load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid)
#update saql with dataset ID and version ID
for i in range(0,len(load_stmt_new)):
saql = saql.replace(load_stmt_old[i], load_stmt_new[i])
saql = saql.replace('\\"','\"')
if verbose == True:
print('Running SAQL Query...')
#run query and return dataframe or save as csv
payload = {"query":saql}
r = requests.post(self.env_url+'/services/data/v46.0/wave/query', headers=self.header, data=json.dumps(payload) )
df = json_normalize(json.loads(r.text)['results']['records'])
if save_path is not None:
if verbose == True:
print('Saving result to CSV...')
df.to_csv(save_path, index=False)
if verbose == True:
end = time.time()
print('Dataframe saved to CSV...')
print('Completed in '+str(round(end-start,3))+'sec')
return df
else:
if verbose == True:
end = time.time()
print('Completed in '+str(round(end-start,3))+'sec')
return df
def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None):
'''
version number goes backwards 0 = current version 20 is max oldest version.
Typically best practice to run the function and view the history first before supplying a version number.
'''
#get broken dashboard version history
r = requests.get(self.env_url+'/services/data/v46.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header)
history_df = json_normalize(json.loads(r.text)['histories'])
if save_json_path is not None and version_num is not None:
preview_link = history_df['previewUrl'].tolist()[version_num]
r_restore = requests.get(self.env_url+preview_link, headers=self.header)
with open(save_json_path, 'w', encoding='utf-8') as f:
json.dump(r_restore.json(), f, ensure_ascii=False, indent=4)
elif version_num is not None:
payload = { "historyId": history_df['id'].tolist()[version_num] }
fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload))
else:
return history_df
def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3):
if verbose == True:
start = time.time()
progress_counter = 0
print('Getting app user list and access details...')
print('Process started at: '+str(self.get_local_time()))
if app_id is None:
'''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST
ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF')
Proposed Solution is to add a try/except block to handle the error
'''
app_user_df = pd.DataFrame()
attempts = 0
while attempts < max_request_attempts:
try:
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders', headers=self.header)
response = json.loads(r.text)
total_size = response['totalSize']
next_page = response['nextPageUrl']
break
except:
attempts += 1
logging.warning("Unexpected error:", sys.exc_info()[0])
logging.warning("Trying again...")
for app in response['folders']:
attempts = 0
while attempts < max_request_attempts:
try:
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders/'+app["id"], headers=self.header)
users = json.loads(r.text)['shares']
for u in users:
app_user_df = app_user_df.append( { "AppId": app['id'],
"AppName": app['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
break
except:
attempts += 1
logging.warning("Unexpected error:", sys.exc_info()[0])
logging.warning("Trying again...")
#continue to pull data from next page
attempts = 0 # reset attempts for additional pages
while next_page is not None:
progress_counter += 25
if verbose == True:
print('Progress: '+str(round(progress_counter/total_size*100,1))+'%', end='', flush=True)
while attempts < max_request_attempts:
try:
np = requests.get(self.env_url+next_page, headers=self.header)
response = json.loads(np.text)
next_page = response['nextPageUrl']
break
except KeyError:
next_page = None
logging.error(sys.exc_info()[0])
break
except:
attempts += 1
logging.warning("Unexpected error:", sys.exc_info()[0])
logging.warning("Trying again...")
while attempts < max_request_attempts:
try:
for app in response['folders']:
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders/'+app["id"], headers=self.header)
users = json.loads(r.text)['shares']
for u in users:
app_user_df = app_user_df.append( { "AppId": app['id'],
"AppName": app['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
break
except:
attempts += 1
logging.warning("Unexpected error:", sys.exc_info()[0])
logging.warning("Trying again...")
elif app_id is not None:
app_user_df = pd.DataFrame()
if type(app_id) is list or type(app_id) is tuple:
for app in app_id:
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders/'+app, headers=self.header)
response = json.loads(r.text)
for u in response['shares']:
app_user_df = app_user_df.append( { "AppId": app,
"AppName": response['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
else:
logging.error('Please input a list or tuple of app Ids')
sys.exit(1)
if save_path is not None:
if verbose == True:
print('Saving result to CSV...')
app_user_df.to_csv(save_path, index=False)
if verbose == True:
end = time.time()
print('Dataframe saved to CSV...')
print('Completed in '+str(round(end-start,3))+'sec')
return app_user_df
else:
if verbose == True:
end = time.time()
print('Completed in '+str(round(end-start,3))+'sec')
return app_user_df
def update_app_access(self, user_dict, app_id, update_type, verbose=False):
'''
update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers
'''
if verbose == True:
start = time.time()
print('Updating App Access...')
print('Process started at: '+str(self.get_local_time()))
if update_type == 'fullReplaceAccess':
shares = user_dict
elif update_type == 'addNewUsers':
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders/'+app_id, headers=self.header)
response = json.loads(r.text)
shares = response['shares']
#remove fields in the JSON that we don't want
for s in shares:
try:
del s['sharedWithLabel']
except:
pass
try:
del s['imageUrl']
except:
pass
shares = shares + user_dict
elif update_type == 'removeUsers':
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders/'+app_id, headers=self.header)
response = json.loads(r.text)
shares = response['shares']
to_remove = []
for u in user_dict:
to_remove.append(u['sharedWithId'])
for s in shares:
if s['sharedWithId'] in to_remove:
shares.remove(s)
#remove fields in the JSON that we don't want
for s in shares:
try:
del s['sharedWithLabel']
except:
pass
try:
del s['imageUrl']
except:
pass
elif update_type == 'updateUsers':
r = requests.get(self.env_url+'/services/data/v46.0/wave/folders/'+app_id, headers=self.header)
response = json.loads(r.text)
shares = response['shares']
to_update = []
for u in user_dict:
to_update.append(u['sharedWithId'])
for s in range(0,len(shares)):
if shares[s]['sharedWithId'] in to_update:
shares[s] = next(item for item in user_dict if item["sharedWithId"] == shares[s]['sharedWithId'])
#remove fields in the JSON that we don't want
for s in shares:
try:
del s['sharedWithLabel']
except:
pass
try:
del s['imageUrl']
except:
pass
else:
shares = None
logging.error('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers')
sys.exit(1)
if shares is not None:
payload = {"shares": shares}
r = requests.patch(self.env_url+'/services/data/v46.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload))
if verbose == True:
end = time.time()
print('User Access Updated')
print('Completed in '+str(round(end-start,3))+'sec')
def update_dashboard_access(self, update_df, update_type, verbose=True):
'''
Function to make it easier to update access using dashboard names vs finding all apps needed.
update dataframe should have the following columns: Dashboard Id, Access Type, and User Id
'''
pass
def remove_non_ascii(self, df, columns=None):
if columns == None:
columns = df.columns
else:
columns = columns
for c in columns:
if df[c].dtype == "O":
df[c] = df[c].apply(lambda x: unidecode(x).replace("?",""))
def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val="0.0", default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n"):
dataset_label = dataset_label
dataset_api_name = dataset_label.replace(" ","_")
fields = []
for c in df.columns:
if df[c].dtype == "datetime64[ns]":
name = c.replace(" ","_")
name = name.replace("__","_")
date = {
"fullyQualifiedName": name,
"name": name,
"type": "Date",
"label": c,
"format": "yyyy-MM-dd HH:mm:ss"
}
fields.append(date)
elif np.issubdtype(df[c].dtype, np.number):
if useNumericDefaults == True:
precision = 18
scale = 2
elif useNumericDefaults == False:
precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max()
scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min()
name = c.replace(" ","_")
name = name.replace("__","_")
measure = {
"fullyQualifiedName": name,
"name": name,
"type": "Numeric",
"label": c,
"precision": precision,
"defaultValue": default_measure_val,
"scale": scale,
"format": default_measure_fmt,
"decimalSeparator": "."
}
fields.append(measure)
else:
name = c.replace(" ","_")
name = name.replace("__","_")
dimension = {
"fullyQualifiedName": name,
"name": name,
"type": "Text",
"label": c
}
fields.append(dimension)
xmd = {
"fileFormat": {
"charsetName": charset,
"fieldsDelimitedBy": deliminator,
"linesTerminatedBy": lineterminator
},
"objects": [
{
"connector": "CSV",
"fullyQualifiedName": dataset_api_name,
"label": dataset_label,
"name": dataset_api_name,
"fields": fields
}
]
}
return str(xmd).replace("'",'"')
def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val="0.0", max_request_attempts=3,
default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False):
'''
field names will show up exactly as the column names in the supplied dataframe
'''
if verbose == True:
start = time.time()
print('Loading Data to Einstein Analytics...')
print('Process started at: '+str(self.get_local_time()))
dataset_api_name = dataset_api_name.replace(" ","_")
if fillna == True:
for c in df.columns:
if df[c].dtype == "O":
df[c].fillna('NONE', inplace=True)
elif np.issubdtype(df[c].dtype, np.number):
df[c].fillna(0, inplace=True)
elif df[c].dtype == "datetime64[ns]":
df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True)
if ascii_columns is not None:
self.remove_non_ascii(df, columns=ascii_columns)
elif removeNONascii == True:
self.remove_non_ascii(df)
# Upload Config Steps
if xmd is not None:
xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode()
else:
xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val,
default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode()
upload_config = {
'Format' : 'CSV',
'EdgemartAlias' : dataset_api_name,
'Operation' : operation,
'Action' : 'None',
'MetadataJson': xmd64
}
r1 = requests.post(self.env_url+'/services/data/v46.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config))
try:
json.loads(r1.text)['success'] == True
except:
logging.error(' Upload Config Failed', exc_info=True)
logging.error(r1.text)
sys.exit(1)
if verbose == True:
print('Upload Configuration Complete...')
print('Chunking and Uploading Data Parts...')
MAX_FILE_SIZE = 10 * 1000 * 1000 - 49
df_memory = sys.getsizeof(df)
rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE))
partnum = 0
range_start = 0
max_data_part = rows_in_part
for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)):
df_part = df.iloc[range_start:max_data_part,:]
if chunk == 0:
data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode()
else:
data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode()
range_start += rows_in_part
max_data_part += rows_in_part
partnum += 1
if verbose == True:
print('\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True)
payload = {
"InsightsExternalDataId" : json.loads(r1.text)['id'],
"PartNumber" : str(partnum),
"DataFile" : data_part64
}
attempts = 0
while attempts < max_request_attempts:
try:
r2 = requests.post(self.env_url+'/services/data/v46.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload))
json.loads(r2.text)['success'] == True
break
except:
attempts += 1
logging.error('\n Datapart Upload Failed', exc_info=True)
logging.debug(r2.text)
if verbose == True:
print('\nDatapart Upload Complete...')
payload = {
"Action" : "Process"
}
attempts = 0
while attempts < max_request_attempts:
try:
r3 = requests.patch(self.env_url+'/services/data/v46.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload))
break
except TimeoutError as e:
attempts += 1
logging.debug(sys.exc_info()[0])
logging.warning("Connection Timeout Error. Trying again...")
if verbose == True:
end = time.time()
print('Data Upload Process Started. Check Progress in Data Monitor.')
print('Job ID: '+str(json.loads(r1.text)['id']))
print('Completed in '+str(round(end-start,3))+'sec')
def addArchivePrefix(self, warnList, prefix='[ARCHIVE] ', removePrefix=False, verbose=False):
'''
Function to add a warning that an asset will soon be archived.
The name of the dashboard will have the chosen prefix added.
max label length is 80 chars and is right trimmed if longer possibly erasing the original title
Adds prefix to existing label so running twice could overwrite original title
'''
for a in range(0,len(warnList)):
try:
r = requests.get(self.env_url+'/services/data/v46.0/wave/dashboards/'+warnList[a], headers=self.header)
currentLabel = json.loads(r.text)['label']
if removePrefix == True:
if currentLabel[:len(prefix)] == prefix: #adding check to make sure original lable isn't overwritten
newLabel = currentLabel[len(prefix):]
else:
newLabel = prefix+currentLabel
payload = {'label': newLabel[0:79]}
r = requests.patch(self.env_url+'/services/data/v46.0/wave/dashboards/'+warnList[a], headers=self.header, data=json.dumps(payload))
if json.loads(r.text)['label'] == prefix+currentLabel:
logging.debug('Successfully updated asset name for: '+warnList[a])
if verbose == True:
print('Progress: '+str(round(a/len(warnList)*100,1))+'%', end='', flush=True)
except:
try:
r = requests.get(self.env_url+'/services/data/v46.0/wave/lenses/'+warnList[a], headers=self.header)
currentLabel = json.loads(r.text)['label']
if removePrefix == True:
if currentLabel[:len(prefix)] == prefix: #adding check to make sure original lable isn't overwritten
newLabel = currentLabel[len(prefix):]
else:
newLabel = prefix+currentLabel
payload = {'label': newLabel[0:79]} #max char len for label = 80
r = requests.patch(self.env_url+'/services/data/v46.0/wave/lenses/'+warnList[a], headers=self.header, data=json.dumps(payload))
#debugging code that should be removed
if json.loads(r.text)['label'] == prefix+currentLabel:
logging.debug('Successfully updated asset name for: '+warnList[a])
##########################################
if verbose == True:
print('Progress: '+str(round(a/len(warnList)*100,1))+'%', end='', flush=True)
except:
logging.warning(' could not update asset label: '+warnList[a])
def archiveAssets(self, archiveAppId, ToMoveList, verbose=False):
'''
ToMoveList can be the Ids for either a dashboard or a lens
'''
payload = {'folder': {'id':archiveAppId} }
for a in range(0,len(ToMoveList)):
try:
r = requests.patch(self.env_url+'/services/data/v46.0/wave/dashboards/'+ToMoveList[a], headers=self.header, data=json.dumps(payload) )
if json.loads(r.text)['folder']['id'] == archiveAppId: #check to ensure response has new folder id
if verbose == True:
print('Progress: '+str(round(a/len(ToMoveList)*100,1))+'%', end='', flush=True)
logging.debug('Successfully archived (type=dashboard): '+ToMoveList[a])
except:
# if response does not contain the new folder id then try same command for a lens
try:
r = requests.patch(self.env_url+'/services/data/v46.0/wave/lenses/'+ToMoveList[a], headers=self.header, data=json.dumps(payload) )
if json.loads(r.text)['folder']['id'] == archiveAppId: #check to ensure response has new folder id
if verbose == True:
print('Progress: '+str(round(a/len(ToMoveList)*100,1))+'%', end='', flush=True)
logging.debug('Successfully archived (type=lens): '+ToMoveList[a])
except:
logging.warning(' could not move asset: '+ToMoveList[a])
def getMetaData(self, appIdList, objectList=['dashboards','lenses','datasets'], max_request_attempts=3, verbose=False):
progress_counter = 0
convertToDateList = ['createdDate','lastModifiedDate','refreshDate']
assets_df = pd.DataFrame()
for a in appIdList:
if verbose == True:
progress_counter += 1
print('Progress: '+str(round(progress_counter/len(appIdList)*100,1))+'%', end='', flush=True)
params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'folderId': a}
for obj in objectList:
attempts = 0
while attempts < max_request_attempts:
try:
r1 = requests.get(self.env_url+'/services/data/v46.0/wave/'+obj, headers=self.header, params=params)
response = json.loads(r1.text)
app_assets_df = json_normalize(response[obj])
total_size = response['totalSize']
try:
next_page = json.loads(r1.text)['nextPageUrl']
except KeyError as e:
logging.debug(e)
next_page = None
break
except:
attempts += 1
logging.warning("Unexpected error:", sys.exc_info()[0])
logging.warning("Trying again...")
assets_df = assets_df.append(app_assets_df, ignore_index=True)
#continue to pull data from next page if found
attempts = 0 # reset attempts for additional pages
while next_page is not None:
while attempts < max_request_attempts:
try:
r1 = requests.get(self.env_url+next_page, headers=self.header, params=params)
app_assets_df = json_normalize(json.loads(r1.text)[obj])
try:
next_page = json.loads(r1.text)['nextPageUrl']
except KeyError as e:
logging.debug(e)
next_page = None
break
except:
attempts += 1
logging.warning("Unexpected error:", sys.exc_info()[0])
logging.warning("Trying again...")
assets_df = assets_df.append(app_assets_df, ignore_index=True)
for i in convertToDateList:
assets_df[i].fillna('1900-01-01T00:00:00.000Z', inplace=True)
assets_df[i] = assets_df[i].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = | pd.Series([0, np.nan, 2, 3, 4]) | pandas.Series |
import addfips
import os
import pandas as pd
import datetime
ageVariables = {
'DATE': 'date_stamp',
'AGE_RANGE': 'age_group',
'AR_TOTALCASES': 'cnt_confirmed',
'AR_TOTALPERCENT': 'pct_confirmed',
'AR_NEWCASES': 'cnt_confirmed_new',
'AR_NEWPERCENT': 'pct_confirmed_new',
'AR_TOTALDEATHS' : 'cnt_death',
'AR_NEWDEATHS': 'cnt_death_new'
}
countyVariables = {
'DATE': 'date_stamp',
'COUNTY': 'us_county_fips',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSPITALIZED': 'cnt_hospitalized_new',
'TOTAL_HOSPITALIZED': 'cnt_hospitalized',
}
dailyVariables = {
'DATE': 'date_stamp',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSP': 'cnt_hospitalized_new',
'TOTAL_HOSP': 'cnt_hospitalized',
}
raceEthSexVariables = {
'Date': 'date_stamp',
'Category': 'category_type',
'Cat_Detail': 'category_name',
'CAT_DETAIL': 'category_name',
'Cat_CaseCount': 'cnt_confirmed',
'Cat_Percent': 'pct_confirmed',
'CAT_DEATHCOUNT' : 'cnt_death',
'CAT_DEATHPERCENT': 'pct_death'
}
def cleanAgeData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(ageVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Code age ranges
df['age_group'] = df['age_group'].map({ '0-10 years':'00', '11-20 years': '11', '21-30 years': '21', '31-40 years': '31', '41-50 years': '41', '51-60 years': '51', '61-70 years': '61', '71-80 years': '71', '81+ years': '81', 'Pending': '99' })
# multiply the percentages by 100
df['pct_confirmed'] = df['pct_confirmed'].apply(lambda x: round(x*100,4))
df['pct_confirmed_new'] = df['pct_confirmed_new'].apply(lambda x: round(x*100, 4))
#cast count variables to integers
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
# reorder so that the cnt and new are always next to each other in the same order
df = df[['date_stamp', 'age_group', 'cnt_confirmed', 'cnt_confirmed_new', 'pct_confirmed', 'pct_confirmed_new', 'cnt_death', 'cnt_death_new']]
# order the records by date
df = df.sort_values(by=['date_stamp','age_group'], ascending=True)
return df
def cleanCountyData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(countyVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Copy original county value to keep the pending and out of state values
df['tn_covid_geo'] = df['us_county_fips']
# Change county name to fips code
af = addfips.AddFIPS()
fips = []
for key, value in df['us_county_fips'].items():
fips.append(af.get_county_fips(value, 'Tennessee'))
df['us_county_fips'] = fips
# Copy appropriate fips codes to covid geo
df.loc[(df['tn_covid_geo'] != 'Pending') & (df['tn_covid_geo'] != 'Out of State'), 'tn_covid_geo'] = df['us_county_fips']
df.loc[df['tn_covid_geo'] == 'Pending', 'tn_covid_geo'] = '47PEN'
df.loc[df['tn_covid_geo'] == 'Out of State', 'tn_covid_geo'] = '47OOS'
# format as Integers a none
df['cnt_total'] = df['cnt_total'].astype(pd.Int32Dtype())
df['cnt_total_new'] = df['cnt_total_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_confirmed_new'] = df['cnt_confirmed_new'].astype(pd.Int32Dtype())
if 'cnt_probable' in df.columns:
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
df['cnt_probable_new'] = df['cnt_probable_new'].astype(pd.Int32Dtype())
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested'] = df['cnt_tested'].astype(pd.Int32Dtype())
df['cnt_tested_new'] = df['cnt_tested_new'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_recovered_new'] = df['cnt_recovered_new'].astype(pd.Int32Dtype())
df['cnt_recovered'] = df['cnt_recovered'].astype(pd.Int32Dtype())
df['cnt_active_new'] = df['cnt_active_new'].astype(pd.Int32Dtype())
df['cnt_active'] = df['cnt_active'].astype( | pd.Int32Dtype() | pandas.Int32Dtype |
from statsmodels.compat.pandas import Appender, is_numeric_dtype
from typing import Sequence, Union
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_categorical_dtype
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from statsmodels.stats.stattools import jarque_bera
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.docstring import Docstring, Parameter
from statsmodels.tools.validation import (
array_like,
bool_like,
float_like,
int_like,
)
PERCENTILES = (1, 5, 10, 25, 50, 75, 90, 95, 99)
QUANTILES = np.array(PERCENTILES) / 100.0
def pd_ptp(df):
return df.max() - df.min()
def nancount(x, axis=0):
return (1 - np.isnan(x)).sum(axis=axis)
def nanptp(arr, axis=0):
return np.nanmax(arr, axis=axis) - np.nanmin(arr, axis=axis)
def nanuss(arr, axis=0):
return np.nansum(arr ** 2, axis=axis)
def nanpercentile(arr, axis=0):
return np.nanpercentile(arr, PERCENTILES, axis=axis)
def nankurtosis(arr, axis=0):
return stats.kurtosis(arr, axis=axis, nan_policy="omit")
def nanskewness(arr, axis=0):
return stats.skew(arr, axis=axis, nan_policy="omit")
MISSING = {
"obs": nancount,
"mean": np.nanmean,
"std": np.nanstd,
"max": np.nanmax,
"min": np.nanmin,
"ptp": nanptp,
"var": np.nanvar,
"skew": nanskewness,
"uss": nanuss,
"kurtosis": nankurtosis,
"percentiles": nanpercentile,
}
def _kurtosis(a):
"""
wrapper for scipy.stats.kurtosis that returns nan instead of raising Error
missing options
"""
try:
res = stats.kurtosis(a)
except ValueError:
res = np.nan
return res
def _skew(a):
"""
wrapper for scipy.stats.skew that returns nan instead of raising Error
missing options
"""
try:
res = stats.skew(a)
except ValueError:
res = np.nan
return res
def sign_test(samp, mu0=0):
"""
Signs test
Parameters
----------
samp : array_like
1d array. The sample for which you want to perform the sign test.
mu0 : float
See Notes for the definition of the sign test. mu0 is 0 by
default, but it is common to set it to the median.
Returns
-------
M
p-value
Notes
-----
The signs test returns
M = (N(+) - N(-))/2
where N(+) is the number of values above `mu0`, N(-) is the number of
values below. Values equal to `mu0` are discarded.
The p-value for M is calculated using the binomial distribution
and can be interpreted the same as for a t-test. The test-statistic
is distributed Binom(min(N(+), N(-)), n_trials, .5) where n_trials
equals N(+) + N(-).
See Also
--------
scipy.stats.wilcoxon
"""
samp = np.asarray(samp)
pos = np.sum(samp > mu0)
neg = np.sum(samp < mu0)
M = (pos - neg) / 2.0
try:
p = stats.binomtest(min(pos, neg), pos + neg, 0.5).pvalue
except AttributeError:
# Remove after min SciPy >= 1.7
p = stats.binom_test(min(pos, neg), pos + neg, 0.5)
return M, p
NUMERIC_STATISTICS = (
"nobs",
"missing",
"mean",
"std_err",
"ci",
"std",
"iqr",
"iqr_normal",
"mad",
"mad_normal",
"coef_var",
"range",
"max",
"min",
"skew",
"kurtosis",
"jarque_bera",
"mode",
"median",
"percentiles",
)
CATEGORICAL_STATISTICS = ("nobs", "missing", "distinct", "top", "freq")
_additional = [
stat for stat in CATEGORICAL_STATISTICS if stat not in NUMERIC_STATISTICS
]
DEFAULT_STATISTICS = NUMERIC_STATISTICS + tuple(_additional)
class Description:
"""
Extended descriptive statistics for data
Parameters
----------
data : array_like
Data to describe. Must be convertible to a pandas DataFrame.
stats : Sequence[str], optional
Statistics to include. If not provided the full set of statistics is
computed. This list may evolve across versions to reflect best
practices. Supported options are:
"nobs", "missing", "mean", "std_err", "ci", "ci", "std", "iqr",
"iqr_normal", "mad", "mad_normal", "coef_var", "range", "max",
"min", "skew", "kurtosis", "jarque_bera", "mode", "freq",
"median", "percentiles", "distinct", "top", and "freq". See Notes for
details.
numeric : bool, default True
Whether to include numeric columns in the descriptive statistics.
categorical : bool, default True
Whether to include categorical columns in the descriptive statistics.
alpha : float, default 0.05
A number between 0 and 1 representing the size used to compute the
confidence interval, which has coverage 1 - alpha.
use_t : bool, default False
Use the Student's t distribution to construct confidence intervals.
percentiles : sequence[float]
A distinct sequence of floating point values all between 0 and 100.
The default percentiles are 1, 5, 10, 25, 50, 75, 90, 95, 99.
ntop : int, default 5
The number of top categorical labels to report. Default is
Attributes
----------
numeric_statistics
The list of supported statistics for numeric data
categorical_statistics
The list of supported statistics for categorical data
default_statistics
The default list of statistics
See Also
--------
pandas.DataFrame.describe
Basic descriptive statistics
describe
A simplified version that returns a DataFrame
Notes
-----
The selectable statistics include:
* "nobs" - Number of observations
* "missing" - Number of missing observations
* "mean" - Mean
* "std_err" - Standard Error of the mean assuming no correlation
* "ci" - Confidence interval with coverage (1 - alpha) using the normal or
t. This option creates two entries in any tables: lower_ci and upper_ci.
* "std" - Standard Deviation
* "iqr" - Interquartile range
* "iqr_normal" - Interquartile range relative to a Normal
* "mad" - Mean absolute deviation
* "mad_normal" - Mean absolute deviation relative to a Normal
* "coef_var" - Coefficient of variation
* "range" - Range between the maximum and the minimum
* "max" - The maximum
* "min" - The minimum
* "skew" - The skewness defined as the standardized 3rd central moment
* "kurtosis" - The kurtosis defined as the standardized 4th central moment
* "jarque_bera" - The Jarque-Bera test statistic for normality based on
the skewness and kurtosis. This option creates two entries, jarque_bera
and jarque_beta_pval.
* "mode" - The mode of the data. This option creates two entries in all tables,
mode and mode_freq which is the empirical frequency of the modal value.
* "median" - The median of the data.
* "percentiles" - The percentiles. Values included depend on the input value of
``percentiles``.
* "distinct" - The number of distinct categories in a categorical.
* "top" - The mode common categories. Labeled top_n for n in 1, 2, ..., ``ntop``.
* "freq" - The frequency of the common categories. Labeled freq_n for n in 1,
2, ..., ``ntop``.
"""
_int_fmt = ["nobs", "missing", "distinct"]
numeric_statistics = NUMERIC_STATISTICS
categorical_statistics = CATEGORICAL_STATISTICS
default_statistics = DEFAULT_STATISTICS
def __init__(
self,
data: Union[np.ndarray, pd.Series, pd.DataFrame],
stats: Sequence[str] = None,
*,
numeric: bool = True,
categorical: bool = True,
alpha: float = 0.05,
use_t: bool = False,
percentiles: Sequence[Union[int, float]] = PERCENTILES,
ntop: bool = 5,
):
data_arr = data
if not isinstance(data, (pd.Series, pd.DataFrame)):
data_arr = array_like(data, "data", maxdim=2)
if data_arr.ndim == 1:
data = pd.Series(data)
numeric = bool_like(numeric, "numeric")
categorical = bool_like(categorical, "categorical")
include = []
col_types = ""
if numeric:
include.append(np.number)
col_types = "numeric"
if categorical:
include.append("category")
col_types += "and " if col_types != "" else ""
col_types += "categorical"
if not numeric and not categorical:
raise ValueError(
"At least one of numeric and categorical must be True"
)
self._data = pd.DataFrame(data).select_dtypes(include)
if self._data.shape[1] == 0:
raise ValueError(
"Selecting {col_types} results in an empty DataFrame"
)
self._is_numeric = [is_numeric_dtype(dt) for dt in self._data.dtypes]
self._is_cat_like = [
is_categorical_dtype(dt) for dt in self._data.dtypes
]
if stats is not None:
undef = [stat for stat in stats if stat not in DEFAULT_STATISTICS]
if undef:
raise ValueError(
f"{', '.join(undef)} are not known statistics"
)
self._stats = (
list(DEFAULT_STATISTICS) if stats is None else list(stats)
)
self._ntop = int_like(ntop, "ntop")
self._compute_top = "top" in self._stats
self._compute_freq = "freq" in self._stats
if self._compute_top and self._ntop <= 0 < sum(self._is_cat_like):
raise ValueError("top must be a non-negative integer")
# Expand special stats
replacements = {
"mode": ["mode", "mode_freq"],
"ci": ["upper_ci", "lower_ci"],
"jarque_bera": ["jarque_bera", "jarque_bera_pval"],
"top": [f"top_{i}" for i in range(1, self._ntop + 1)],
"freq": [f"freq_{i}" for i in range(1, self._ntop + 1)],
}
for key in replacements:
if key in self._stats:
idx = self._stats.index(key)
self._stats = (
self._stats[:idx]
+ replacements[key]
+ self._stats[idx + 1 :]
)
self._percentiles = array_like(
percentiles, "percentiles", maxdim=1, dtype="d"
)
self._percentiles = np.sort(self._percentiles)
if np.unique(self._percentiles).shape[0] != self._percentiles.shape[0]:
raise ValueError("percentiles must be distinct")
if np.any(self._percentiles >= 100) or np.any(self._percentiles <= 0):
raise ValueError("percentiles must be strictly between 0 and 100")
self._alpha = float_like(alpha, "alpha")
if not 0 < alpha < 1:
raise ValueError("alpha must be strictly between 0 and 1")
self._use_t = bool_like(use_t, "use_t")
def _reorder(self, df: pd.DataFrame) -> pd.DataFrame:
return df.loc[[s for s in self._stats if s in df.index]]
@cache_readonly
def frame(self) -> pd.DataFrame:
"""
Descriptive statistics for both numeric and categorical data
Returns
-------
DataFrame
The statistics
"""
numeric = self.numeric
categorical = self.categorical
if categorical.shape[1] == 0:
return numeric
elif numeric.shape[1] == 0:
return categorical
df = pd.concat([numeric, categorical], axis=1)
return self._reorder(df[self._data.columns])
@cache_readonly
def numeric(self) -> pd.DataFrame:
"""
Descriptive statistics for numeric data
Returns
-------
DataFrame
The statistics of the numeric columns
"""
df: pd.DataFrame = self._data.loc[:, self._is_numeric]
cols = df.columns
_, k = df.shape
std = df.std()
count = df.count()
mean = df.mean()
mad = (df - mean).abs().mean()
std_err = std.copy()
std_err.loc[count > 0] /= count.loc[count > 0]
if self._use_t:
q = stats.t(count - 1).ppf(1.0 - self._alpha / 2)
else:
q = stats.norm.ppf(1.0 - self._alpha / 2)
def _mode(ser):
mode_res = stats.mode(ser.dropna())
if mode_res[0].shape[0] > 0:
return [float(val) for val in mode_res]
return np.nan, np.nan
mode_values = df.apply(_mode).T
if mode_values.size > 0:
if isinstance(mode_values, pd.DataFrame):
# pandas 1.0 or later
mode = np.asarray(mode_values[0], dtype=float)
mode_counts = np.asarray(mode_values[1], dtype=np.int64)
else:
# pandas before 1.0 returns a Series of 2-elem list
mode = []
mode_counts = []
for idx in mode_values.index:
val = mode_values.loc[idx]
mode.append(val[0])
mode_counts.append(val[1])
mode = np.atleast_1d(mode)
mode_counts = np.atleast_1d(mode_counts)
else:
mode = mode_counts = np.empty(0)
loc = count > 0
mode_freq = np.full(mode.shape[0], np.nan)
mode_freq[loc] = mode_counts[loc] / count.loc[loc]
# TODO: Workaround for pandas AbstractMethodError in extension
# types. Remove when quantile is supported for these
_df = df
try:
from pandas.api.types import is_extension_array_dtype
_df = df.copy()
for col in df:
if is_extension_array_dtype(df[col].dtype):
_df[col] = _df[col].astype(object).fillna(np.nan)
except ImportError:
pass
if df.shape[1] > 0:
iqr = _df.quantile(0.75) - _df.quantile(0.25)
else:
iqr = mean
def _safe_jarque_bera(c):
a = np.asarray(c)
if a.shape[0] < 2:
return (np.nan,) * 4
return jarque_bera(a)
jb = df.apply(
lambda x: list(_safe_jarque_bera(x.dropna())), result_type="expand"
).T
nan_mean = mean.copy()
nan_mean.loc[nan_mean == 0] = np.nan
coef_var = std / nan_mean
results = {
"nobs": pd.Series(
np.ones(k, dtype=np.int64) * df.shape[0], index=cols
),
"missing": df.shape[0] - count,
"mean": mean,
"std_err": std_err,
"upper_ci": mean + q * std_err,
"lower_ci": mean - q * std_err,
"std": std,
"iqr": iqr,
"mad": mad,
"coef_var": coef_var,
"range": pd_ptp(df),
"max": df.max(),
"min": df.min(),
"skew": jb[2],
"kurtosis": jb[3],
"iqr_normal": iqr / np.diff(stats.norm.ppf([0.25, 0.75])),
"mad_normal": mad / np.sqrt(2 / np.pi),
"jarque_bera": jb[0],
"jarque_bera_pval": jb[1],
"mode": pd.Series(mode, index=cols),
"mode_freq": pd.Series(mode_freq, index=cols),
"median": df.median(),
}
final = {k: v for k, v in results.items() if k in self._stats}
results_df = pd.DataFrame(
list(final.values()), columns=cols, index=list(final.keys())
)
if "percentiles" not in self._stats:
return results_df
# Pandas before 1.0 cannot handle empty DF
if df.shape[1] > 0:
# TODO: Remove when extension types support quantile
perc = _df.quantile(self._percentiles / 100).astype(float)
else:
perc = pd.DataFrame(index=self._percentiles / 100, dtype=float)
if np.all(np.floor(100 * perc.index) == (100 * perc.index)):
perc.index = [f"{int(100 * idx)}%" for idx in perc.index]
else:
dupe = True
scale = 100
index = perc.index
while dupe:
scale *= 10
idx = np.floor(scale * perc.index)
if np.all(np.diff(idx) > 0):
dupe = False
index = np.floor(scale * index) / (scale / 100)
fmt = f"0.{len(str(scale//100))-1}f"
output = f"{{0:{fmt}}}%"
perc.index = [output.format(val) for val in index]
# Add in the names of the percentiles to the output
self._stats = self._stats + perc.index.tolist()
return self._reorder(pd.concat([results_df, perc], axis=0))
@cache_readonly
def categorical(self) -> pd.DataFrame:
"""
Descriptive statistics for categorical data
Returns
-------
DataFrame
The statistics of the categorical columns
"""
df = self._data.loc[:, [col for col in self._is_cat_like]]
k = df.shape[1]
cols = df.columns
vc = {col: df[col].value_counts(normalize=True) for col in df}
distinct = pd.Series(
{col: vc[col].shape[0] for col in vc}, dtype=np.int64
)
top = {}
freq = {}
for col in vc:
single = vc[col]
if single.shape[0] >= self._ntop:
top[col] = single.index[: self._ntop]
freq[col] = np.asarray(single.iloc[:5])
else:
val = list(single.index)
val += [None] * (self._ntop - len(val))
top[col] = val
freq_val = list(single)
freq_val += [np.nan] * (self._ntop - len(freq_val))
freq[col] = np.asarray(freq_val)
index = [f"top_{i}" for i in range(1, self._ntop + 1)]
top_df = | pd.DataFrame(top, dtype="object", index=index, columns=cols) | pandas.DataFrame |
import sys
import pandas as pd
inputdat=sys.argv[1]
outputf=sys.argv[2]
dat=pd.read_csv(inputdat,sep='\t',index_col=0)
print([i for i in dat.index][0])
ex=[i for i in dat.index if 'ae' in i]
no=[i for i in dat.index if 'aw' in i]
dat_ex=dat.loc[dat.index.isin(ex)]
dat_no=dat.loc[dat.index.isin(no)]
dat_ex['label']=[1 for i in ex]
dat_no['label']=[0 for i in no]
result= | pd.concat([dat_ex,dat_no]) | pandas.concat |
import pandas as pd
import pytest
from kartothek.io.dask.dataframe import collect_dataset_metadata
from kartothek.io.eager import (
store_dataframes_as_dataset,
update_dataset_from_dataframes,
)
from kartothek.io_components.metapartition import _METADATA_SCHEMA, MetaPartition
from kartothek.io_components.write import store_dataset_from_partitions
from kartothek.serialization import ParquetSerializer
def test_collect_dataset_metadata(store_session_factory, dataset):
df_stats = collect_dataset_metadata(
store=store_session_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=None,
frac=1,
).compute()
actual = df_stats.drop(
columns=[
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
actual.sort_values(by=["partition_label", "row_group_id"], inplace=True)
expected = pd.DataFrame(
data={
"partition_label": ["cluster_1", "cluster_2"],
"row_group_id": [0, 0],
"number_rows_total": [1, 1],
"number_row_groups": [1, 1],
"number_rows_per_row_group": [1, 1],
},
index=[0, 0],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_predicates(store_session_factory, dataset):
predicates = [[("P", "==", 1)]]
df_stats = collect_dataset_metadata(
store=store_session_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=predicates,
frac=1,
).compute()
actual = df_stats.drop(
columns=[
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
actual.sort_values(by=["partition_label", "row_group_id"], inplace=True)
# Predicates are only evaluated on index level and have therefore no effect on this dataset
expected = pd.DataFrame(
data={
"partition_label": ["cluster_1", "cluster_2"],
"row_group_id": [0, 0],
"number_rows_total": [1, 1],
"number_row_groups": [1, 1],
"number_rows_per_row_group": [1, 1],
},
index=[0, 0],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_predicates_on_index(store_factory):
df = pd.DataFrame(
data={"P": range(10), "L": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"]}
)
store_dataframes_as_dataset(
store=store_factory, dataset_uuid="dataset_uuid", partition_on=["L"], dfs=[df],
)
predicates = [[("L", "==", "b")]]
df_stats = collect_dataset_metadata(
store=store_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=predicates,
frac=1,
).compute()
assert "L=b" in df_stats["partition_label"].values[0]
df_stats.sort_values(by=["partition_label", "row_group_id"], inplace=True)
actual = df_stats.drop(
columns=[
"partition_label",
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
expected = pd.DataFrame(
data={
"row_group_id": [0],
"number_rows_total": [5],
"number_row_groups": [1],
"number_rows_per_row_group": [5],
},
index=[0],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_predicates_row_group_size(store_factory):
ps = ParquetSerializer(chunk_size=2)
df = pd.DataFrame(
data={"P": range(10), "L": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"]}
)
store_dataframes_as_dataset(
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["L"],
dfs=[df],
df_serializer=ps,
)
predicates = [[("L", "==", "a")]]
df_stats = collect_dataset_metadata(
store=store_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=predicates,
frac=1,
).compute()
for part_label in df_stats["partition_label"]:
assert "L=a" in part_label
df_stats.sort_values(by=["partition_label", "row_group_id"], inplace=True)
actual = df_stats.drop(
columns=[
"partition_label",
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
expected = pd.DataFrame(
data={
"row_group_id": [0, 1, 2],
"number_rows_total": [5, 5, 5],
"number_row_groups": [3, 3, 3],
"number_rows_per_row_group": [2, 2, 1],
},
index=[0, 1, 2],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_frac_smoke(store_session_factory, dataset):
df_stats = collect_dataset_metadata(
store=store_session_factory,
dataset_uuid="dataset_uuid",
table_name="table",
frac=0.8,
).compute()
columns = {
"partition_label",
"row_group_id",
"row_group_compressed_size",
"row_group_uncompressed_size",
"number_rows_total",
"number_row_groups",
"serialized_size",
"number_rows_per_row_group",
}
assert set(df_stats.columns) == columns
def test_collect_dataset_metadata_empty_dataset_mp(store_factory):
mp = MetaPartition(label="cluster_1")
store_dataset_from_partitions(
partition_list=[mp], store=store_factory, dataset_uuid="dataset_uuid"
)
df_stats = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table"
).compute()
expected = pd.DataFrame(columns=_METADATA_SCHEMA.keys())
expected = expected.astype(_METADATA_SCHEMA)
pd.testing.assert_frame_equal(expected, df_stats, check_index_type=False)
def test_collect_dataset_metadata_empty_dataset(store_factory):
df = pd.DataFrame(columns=["A", "b"], index=pd.RangeIndex(start=0, stop=0))
store_dataframes_as_dataset(
store=store_factory, dataset_uuid="dataset_uuid", dfs=[df], partition_on=["A"]
)
df_stats = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table",
).compute()
expected = pd.DataFrame(columns=_METADATA_SCHEMA.keys())
expected = expected.astype(_METADATA_SCHEMA)
pd.testing.assert_frame_equal(expected, df_stats)
def test_collect_dataset_metadata_concat(store_factory):
"""Smoke-test concatenation of empty and non-empty dataset metadata collections."""
df = | pd.DataFrame(data={"A": [1, 1, 1, 1], "b": [1, 1, 2, 2]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
# from pyranges.methods.join import _both_dfs
np.random.seed(0)
def sort_one_by_one(d, col1, col2):
"""
Equivalent to pd.sort_values(by=[col1, col2]), but faster.
"""
d = d.sort_values(by=[col2])
return d.sort_values(by=[col1], kind='mergesort')
def _insert_distance(ocdf, dist, suffix):
if "Distance" not in ocdf:
distance_column_name = "Distance"
elif "Distance" + suffix not in ocdf:
distance_column_name = "Distance" + suffix
else:
i = 1
while "Distance" + str(i) in ocdf:
i += 1
distance_column_name = "Distance" + str(i)
ocdf.insert(ocdf.shape[1], distance_column_name,
| pd.Series(dist, index=ocdf.index) | pandas.Series |
import csv
import json
import os
import shutil
import uuid
from functools import partial
from io import StringIO
import pandas as pd
from pandas import DataFrame
import pyproj
import requests
import shapely.geometry as shapely_geom
import shapely.wkt as shapely_wkt
import app.helper
from app import celery
from app import model
from geojson import Feature, FeatureCollection
from shapely.ops import transform
import xml.etree.ElementTree as ET
from urllib.parse import urlparse, parse_qs
import re
from .. import constants, dbGIS as db
from ..decorators.exceptions import RequestException
from .. import helper
ALLOWED_EXTENSIONS = set(['tif', 'csv'])
GREATER_OR_EQUAL = 'greaterOrEqual'
GREATER = 'greater'
LESSER_OR_EQUAL = 'lesserOrEqual'
LESSER = 'lesser'
EQUAL = 'equal'
class Uploads(db.Model):
'''
This class will describe the model of a file uploaded by a user
'''
__tablename__ = 'uploads'
__table_args__ = (
{"schema": 'user'}
)
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String(255))
name = db.Column(db.String(255))
layer = db.Column(db.String(255))
layer_type = db.Column(db.String(255))
size = db.Column(db.Numeric)
url = db.Column(db.String(255))
is_generated = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('user.users.id'))
@celery.task(name='generate_tiles_file_upload')
def generate_tiles(upload_folder, grey_tif, layer_type, upload_uuid, user_currently_used_space):
'''
This function is used to generate the various tiles of a layer in the db.
:param upload_folder: the folder of the upload
:param grey_tif: the url to the input file
:param layer_type: the type of the layer chosen for the input
:param upload_uuid: the uuid of the upload
:param user_currently_used_space: the space currently used by the user
'''
# we set up the directory for the tif
directory_for_tiles = upload_folder + '/tiles'
tile_path = directory_for_tiles
access_rights = 0o755
try:
os.mkdir(tile_path, access_rights)
except OSError:
print ("Creation of the directory %s failed" % tile_path)
else:
print ("Successfully created the directory %s" % tile_path)
rgb_tif = upload_folder + '/rgba.tif'
if layer_type != 'custom':
helper.colorize(layer_type, grey_tif, rgb_tif)
else:
args_gdal = app.helper.commands_in_array("gdal_translate -of GTiff -expand rgba {} {} -co COMPRESS=DEFLATE ".format(grey_tif, rgb_tif))
app.helper.run_command(args_gdal)
try:
# commands launch to obtain the level of zooms
args_tiles = app.helper.commands_in_array("python3 app/helper/gdal2tiles.py -p 'mercator' -s 'EPSG:3035' -w 'leaflet' -r 'average' -z '4-11' {} {} ".format(rgb_tif, tile_path))
app.helper.run_command(args_tiles)
except :
generate_state = 10
else:
generate_state = 0
# updating generate state of upload
upload = Uploads.query.filter_by(url=grey_tif).first()
upload.is_generated = generate_state
db.session.commit()
check_map_size(upload_folder, user_currently_used_space, upload_uuid)
return generate_state
@celery.task(name='generate_geojson_file_upload')
def generate_geojson(upload_folder, layer_type, upload_uuid, user_currently_used_space):
'''
This function is used to generate the geojson of a layer in the db.
:param upload_folder: the folder of the upload
:param layer_type: the name of the layer type choosen for the input
:param upload_uuid: the uuid of the upload
:param user_currently_used_space: the space currently used by the user
'''
upload_csv = upload_folder + '/data.csv'
try:
geojson_file_path = upload_folder + '/data.json'
with open(geojson_file_path, 'w') as geojson_file:
json.dump(csv_to_geojson(upload_csv, layer_type), geojson_file)
except:
generate_state = 10
else:
generate_state = 0
# updating generate state of upload
upload = Uploads.query.filter_by(uuid=upload_uuid).first()
upload.is_generated = generate_state
db.session.commit()
check_map_size(upload_folder, user_currently_used_space, upload_uuid)
return generate_state
def check_map_size(upload_folder, user_currently_used_space, upload_uuid):
'''
This method is used to check the size of the file
:param upload_folder: the folder where the upload is stored
:param user_currently_used_space: the space already used by the user
:param upload_uuid: the uuid of the upload
:return:
'''
size = 0
for dirpath, dirnames, filenames in os.walk(upload_folder):
for f in filenames:
fp = os.path.join(dirpath, f)
size += float(os.path.getsize(fp)) / 1000000
# we need to check if there is enough disk space for the dataset
total_used_space = user_currently_used_space + size
upload = Uploads.query.filter_by(uuid=upload_uuid).first()
if total_used_space > constants.USER_DISC_SPACE_AVAILABLE:
db.session.delete(upload)
shutil.rmtree(upload_folder)
else:
upload.size = size
db.session.commit()
def generate_csv_string(result):
'''
This method will generate the csv stringIO containing the result of a query without extra data
:param result: the sql result of a csv export
:return resultIO: the StringIO result formatted appropriately
'''
columns_name = result.keys()
# if the selection is empty, we return only the columns names
if result.rowcount == 0:
df = | DataFrame(columns=columns_name) | pandas.DataFrame |
### 공시지가 K-NN ###
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, classification_report
import sklearn.neighbors as neg
import matplotlib.pyplot as plt
import folium
import json
import sklearn.preprocessing as pp
## 데이터 전처리 ## --> 이상치 제거, 표준화 필요 ##
all_data = pd.read_csv("data-set/house_clean02.csv", dtype=np.str, encoding='euc-kr') # encodig: 'euc-kr'
# 면적 당 공시지가 추가 # --> string type이므로 astype을 통해 타입 변경
all_data['y_price'] = all_data['공시지가'].astype(np.float32) / all_data['면적'].astype(np.float32)
# X: (x, y) / y: (면적 당 공시지가) #
X = all_data.iloc[:, 9:11].astype(np.float32) # shape (28046, 2)
y = all_data['y_price'] # shape (28046, )
## Robust scaling ## --> 이상치를 반영한 정규화(min-max)
rs = pp.RobustScaler()
y_scale = rs.fit_transform(np.array(y).reshape(-1, 1))
## 시각화 ## --> 대안 필요(너무 많은 용량으로 열리지 않음)
#city_hall = (37.56629, 126.979808)
#map_osm = folium.Map(location=city_hall, zoom_start=11)
#for i in range(len(X)):
# location = (X.iloc[i, 1], X.iloc[i, 0]) # 위도, 경도 반대
# folium.Marker(location, popup=str(y[i])).add_to(map_osm)
#map_osm.save("price.html") # 저장
## 어린이집 데이터 전처리 ##
all_center = json.load(open("d:/project_data/allmap.json", encoding='utf-8'))
c_header = all_center['DESCRIPTION'] # JSON 분리
c_data = all_center['DATA']
c_alldf = pd.DataFrame(c_data)
# 특정 열만 선택 #
c_alldf = c_alldf[['cot_conts_name', 'cot_coord_x', 'cot_coord_y', 'cot_value_01', 'cot_gu_name']]
c_alldf.columns = ['name', 'x', 'y', 'kinds', 'location']
x_test = c_alldf[c_alldf['kinds'] == "국공립"] # 국공립만 선택
## train_test split ## --> train (X:좌표, y: 공시지가) / test (X:어린이집) ##
## KNN regressor##
k_list = [3,5,10,15,25,50]
# minkowski --> p = 2 // 평균 회귀 --> regressor #
knn_fit = neg.KNeighborsRegressor(n_neighbors=k_list[0], p=2, metric='minkowski')
knn_fit.fit(X, y_scale)
## predict --> 평균가 적용 ##
pred = knn_fit.predict(x_test.iloc[:, 1:3])
x_test['소득추정'] = pred
for i in range(len(x_test['location'])):
x_test['location'].values[i] = x_test['location'].values[i][:-1] # '구' 빼기
## groupby를 통해 구별 평균 소득 추정 ##
mean = x_test.groupby(['location'], as_index=False).mean()
# 한글 폰트 깨지는 문제 #
from matplotlib import font_manager, rc
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
# 시각화 그래프 # --> 구별 평균 추정값 (TEST)
price_pred = pd.DataFrame(mean['소득추정'])
price_pred.index = mean['location']
#plt.figure()
#plt.plot(price_pred, '-')
#plt.show()
## k마다 평균추정치 비교 ## main 코드 --> 구별 평균치 추정
plt.figure()
for i in range(len(k_list)):
knn_fit = neg.KNeighborsRegressor(n_neighbors=k_list[i], p=2, metric='minkowski')
knn_fit.fit(X, y_scale)
x_test["predK%i" %k_list[i]] = knn_fit.predict(x_test.iloc[:, 1:3])
mean = x_test.groupby(['location'], as_index=False).mean()
price_pred = | pd.DataFrame(mean.iloc[:, -1]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(ujson.decode(ujson.encode(i, orient="values")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode( | ujson.encode(i, orient="records") | pandas._libs.json.encode |
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
#funtions
def degree(G,f):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
degree_dic = nx.degree_centrality(G)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
f = pd.merge(f, degree_df, on='name')
return f
def centrality(G,f):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
centrality_dic = nx.degree_centrality(G)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
f = pd.merge(f, centrality_df, on='name')
return f
def betweenness(G,f):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
betweenness_dic = nx.betweenness_centrality(G)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
f = pd.merge(f, betweenness_df, on='name')
return f
def pagerank(G,f):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
pagerank_dic = nx.pagerank(G)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
f = pd.merge(f, pagerank_df, on='name')
return f
def clustering(G,f):
"""
Adds a column to the dataframe f with the clustering coeficient of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
clustering_dic = nx.clustering(G)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
f = pd.merge(f, clustering_df, on='name')
return f
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def communities_label_propagation(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using glabel propagation.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_gen = nx.algorithms.community.label_propagation_communities(G)
communities_dic = [community for community in communities_gen]
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_label_propagation': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def mean_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the mean value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
mean_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
mean_neighbors[i] = f[neighbors.tolist()[0]][column].mean()
f["mean_neighbors"] = mean_neighbors
return f
def std_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the standar desviation value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
std_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
std_neighbors[i] = f[neighbors.tolist()[0]][column].std()
f["std_neighbors"] = std_neighbors
return f
def max_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the maximum value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
max_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
max_neighbors[i] = f[neighbors.tolist()[0]][column].max()
f["max_neighbors"] = max_neighbors
return f
def min_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the minimum value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
min_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
min_neighbors[i] = f[neighbors.tolist()[0]][column].min()
f["min_neighbors"] = min_neighbors
return f
def within_module_degree(G,f, column_communities = None, community_method = "label_propagation"):
"""
the within_module_degree calculates: Zi = (ki-ks)/Ss
Ki = number of links between the node i and all the nodes of its cluster
Ks = mean degree of the nodes in cluster s
Ss = the standar desviation of the nodes in cluster s
The within-module degree z-score measures how well-connected node i is to other nodes in the module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
f: a pandas dataframe.
column_communities: a column of the dataframe with the communities for each node. If None, the communities will be estimated using metodo comunidades.
community_method: method to calculate the communities in the graph G if they are not provided with columna_comunidades.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
if column_communities == None:
if community_method == "label_propagation":
f = communities_label_propagation(G,f)
column_communities = "communities_label_propagation"
elif community_method == "greedy_modularity":
f = communities_greedy_modularity(G,f)
column_communities = "communities_greedy_modularity"
else:
raise ValueError('A clustering method should be provided.')
z_df = pd.DataFrame(data = {'name': [], 'within_module_degree': [] })
for comutnity in set(f[column_communities]):
G2 = G.subgraph(f[f[column_communities] == comutnity]["name"].values)
Ks = 2*len(G2.edges) / len(G2.nodes)
Ss = np.std([i[1] for i in G2.degree()])
z_df = pd.concat([z_df,pd.DataFrame(data = {'name': list(G2.nodes), 'within_module_degree': [(i[1]-Ks)/Ss for i in G2.degree()] }) ])
f = pd.merge(f, z_df, on='name')
return f
def participation_coefficient(G,f, column_communities = None, community_method = "label_propagation"):
"""
the participation_coefficient calculates: Pi = 1- sum_s( (Kis/Kit)^2 )
Kis = number of links between the node i and the nodes of the cluster s
Kit = degree of the node i
The participation coefficient of a node is therefore close to 1 if its links are uniformly distributed among all the modules and 0 if all its links are within its own module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
f: a pandas dataframe.
columna_comunidades: a column of the dataframe with the communities for each node. If None, the communities will be estimated using metodo comunidades.
metodo_comunidades: method to calculate the communities in the graph G if they are not provided with columna_comunidades.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
if column_communities == None:
if community_method == "label_propagation":
f = communities_label_propagation(G,f)
column_communities = "communities_label_propagation"
elif community_method == "greedy_modularity":
f = communities_greedy_modularity(G,f)
column_communities = "communities_greedy_modularity"
else:
raise ValueError('A clustering method should be provided.')
p_df = pd.DataFrame(data = {'name': f['name'], 'participation_coefficient': [1 for _ in f['name']] })
for node in f['name']:
Kit = len(G.edges(node))
for comutnity in set(f[column_communities]):
Kis = len([edge for edge in G.edges(node) if edge[1] in f[ f[column_communities] == comutnity ]["name"]])
p_df.loc[ p_df["name"] == node, 'participation_coefficient' ] -= ( Kis / Kit ) ** 2
f = pd.merge(f, p_df, on='name')
return f
def node_embeddings(G,f,dim=20, walk_length=16, num_walks=100, workers=2):
"""
Adds the embeddings of the nodes to the dataframe f.
G: a networkx graph.
f: a pandas dataframe.
dim: the dimension of the embedding.
<NAME>., & <NAME>. (2016, August). node2vec: Scalable feature learning for networks. In Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 855-864). ACM.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
from node2vec import Node2Vec
node2vec = Node2Vec(G, dimensions=dim, walk_length=walk_length, num_walks=num_walks, workers=workers)
model = node2vec.fit(window=10, min_count=1)
embeddings_df = pd.DataFrame(columns = ['name']+['node_embeddings_'+str(i) for i in range(dim)])
embeddings_df['name'] = f['name']
for name in embeddings_df['name']:
embeddings_df[embeddings_df['name'] == name] = [name] + list(model[str(name)])
f = pd.merge(f, embeddings_df, on='name')
return f
#Transformers
class Dumb(BaseEstimator, TransformerMixin):
def __init__(self,m = 8):
self.m = m
print('a',self.m)
def fit(self, X, y=None):
return self
def transform(self, X):
print('b',self.m)
return X
class Replace(BaseEstimator, TransformerMixin):
def __init__(self, value1,value2):
self.value1 = value1
self.value2 = value2
def fit(self, X, y=None):
return self
def transform(self, X):
return X.replace(self.value1, self.value2, regex=True)
class DropName(BaseEstimator, TransformerMixin):
"""
Drops the "name" column.
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_prima = X.drop(['name'],axis=1)
return X_prima
class Graph_fuction(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the result of the function for each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
function: a python function that takes the graph G as input and outpus a column of the same length that the number of nodes in the graph.
column_name: a string with the name of the column
"""
def __init__(self, G, function, column_name = "Graph_fuction"):
self.G = G
self.function = function
self.column_name = column_name
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
column = self.function(G_train)
degree_df = pd.DataFrame(data = {'name': list(G_train.nodes()), self.column_name: column })
X_prima = pd.merge(X, degree_df, on='name')
print(X_prima.columns)
return X_prima
class Graph_features_fuction(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the result of the function for each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
function: a python function that takes the graph G as input and outpus a column of the same length that the number of nodes in the graph.
column_name: a string with the name of the column
"""
def __init__(self, G, function, column_name = "Graph_features_fuction"):
self.G = G
self.function = function
self.column_name = column_name
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
column = self.function(G_train, X)
degree_df = pd.DataFrame(data = {'name': list(G_train.nodes()), self.column_name: column })
X_prima = pd.merge(X, degree_df, on='name')
print(X_prima.columns)
return X_prima
class Degree(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
degree_dic = nx.degree_centrality(G_train)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
X_prima = pd.merge(X, degree_df, on='name')
return X_prima
class Clustering(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
clustering_dic = nx.clustering(G_train)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
X_prima = pd.merge(X, clustering_df, on='name')
return X_prima
class Centrality(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
centrality_dic = nx.degree_centrality(G_train)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
X_prima = pd.merge(X, centrality_df, on='name')
return X_prima
class Betweenness(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
betweenness_dic = nx.betweenness_centrality(G_train)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
X_prima = pd.merge(X, betweenness_df, on='name')
return X_prima
class Pagerank(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
pagerank_dic = nx.pagerank(G_train)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
X_prima = pd.merge(X, pagerank_df, on='name')
return X_prima
class Communities_greedy_modularity(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the comunity of each node.
The comunitys are detected using greedy modularity.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
communities_dic = nx.algorithms.community.greedy_modularity_communities(G_train)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
X_prima = pd.merge(X,communities_df, on='name')
return X_prima
class Communities_label_propagation(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the comunity of each node.
The comunitys are detected using glabel propagation.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
def __init__(self, G):
self.G = G
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
communities_gen = nx.algorithms.community.label_propagation_communities(G_train)
communities_dic = [community for community in communities_gen]
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_label_propagation': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
X_prima = pd.merge(X,communities_df, on='name')
return X_prima
class Mean_neighbors(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the mean value of its neigbors feature.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
def __init__(self, G, column, n=1):
self.G = G
self.column = column
self.n = n
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
mean_neighbors = np.zeros([X.shape[0]])
matrix = nx.to_numpy_matrix(G_train)
for e in range(1,self.n):
matrix += matrix ** e
for i in range(X.shape[0]):
neighbors = matrix[i]>0
mean_neighbors[i] = X[neighbors.tolist()[0]][self.column].mean()
X_prima = X
X_prima["mean_" + str(self.n) + "_neighbors_" + str(self.column)] = mean_neighbors
return X_prima
class Std_neighbors(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the standar desviation value of its neigbors feature.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
def __init__(self, G, column, n=1):
self.G = G
self.column = column
self.n = n
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
std_neighbors = np.zeros([X.shape[0]])
matrix = nx.to_numpy_matrix(G_train)
for e in range(1,self.n):
matrix += matrix ** e
for i in range(X.shape[0]):
neighbors = matrix[i]>0
std_neighbors[i] = X[neighbors.tolist()[0]][self.column].std()
X_prima = X
X_prima["std_" + str(self.n) + "_neighbors_" + str(self.column)] = std_neighbors
return X_prima
class Max_neighbors(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the maximum value of its neigbors feature.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
def __init__(self, G, column, n=1):
self.G = G
self.column = column
self.n = n
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
max_neighbors = np.zeros([X.shape[0]])
matrix = nx.to_numpy_matrix(G_train)
for e in range(1,self.n):
matrix += matrix ** e
for i in range(X.shape[0]):
neighbors = matrix[i]>0
max_neighbors[i] = X[neighbors.tolist()[0]][self.column].max()
X_prima = X
X_prima["max_" + str(self.n) + "_neighbors_" + str(self.column)] = max_neighbors
return X_prima
class Min_neighbors(BaseEstimator, TransformerMixin):
"""
Adds a column to the dataframe f with the minimum value of its neigbors feature.
G: a networkx graph. The names of the nodes should be incuded in the train dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
def __init__(self, G, column, n=1):
self.G = G
self.column = column
self.n = n
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
min_neighbors = np.zeros([X.shape[0]])
matrix = nx.to_numpy_matrix(G_train)
for e in range(1,self.n):
matrix += matrix ** e
for i in range(X.shape[0]):
neighbors = matrix[i]>0
min_neighbors[i] = X[neighbors.tolist()[0]][self.column].min()
X_prima = X
X_prima["min_" + str(self.n) + "_neighbors_" + str(self.column)] = min_neighbors
return X_prima
class Within_module_degree(BaseEstimator, TransformerMixin):
"""
the within_module_degree calculates: Zi = (ki-ks)/Ss
Ki = number of links between the node i and all the nodes of its cluster
Ks = mean degree of the nodes in cluster s
Ss = the standar desviation of the nodes in cluster s
The within-module degree z-score measures how well-connected node i is to other nodes in the module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
column_communities: a column of the dataframe with the comunities for each node. If None, the comunities will be estimated using metodo communityes.
"""
def __init__(self, G, column_communities):
self.G = G
self.column_communities = column_communities
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
z_df = pd.DataFrame(data = {'name': [], 'within_module_degree': [] })
for community in set(X[self.column_communities]):
G2 = G_train.subgraph(X[X[self.column_communities] == community]["name"].values)
Ks = 2*len(G2.edges) / len(G2.nodes)
Ss = np.std([i[1] for i in G2.degree()])
z_df = pd.concat([z_df,pd.DataFrame(data = {'name': list(G2.nodes), 'within_module_degree': [np.divide(i[1]-Ks, Ss) for i in G2.degree()] }) ])
X_prima = pd.merge(X, z_df, on='name')
return X_prima
class Participation_coefficient(BaseEstimator, TransformerMixin):
"""
The participation_coefficient calculates: Pi = 1- sum_s( (Kis/Kit)^2 )
Kis = number of links between the node i and the nodes of the cluster s
Kit = degree of the node i
The participation coefficient of a node is therefore close to 1 if its links are uniformly distributed among all the modules and 0 if all its links are within its own module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
column_communities: a column of the dataframe with the comunities for each node. If None, the comunities will be estimated using metodo communityes.
"""
def __init__(self, G, column_communities):
self.G = G
self.column_communities = column_communities
def fit(self, X, y=None):
return self
def transform(self, X):
G_train = self.G.subgraph(X['name'].values)
p_df = | pd.DataFrame(data = {'name': X['name'], 'participation_coefficient': [1 for _ in X['name']] }) | pandas.DataFrame |
#!/usr/bin/env python3.6
"""This module describes functions for analysis of the SNSS Dataset"""
import os
import pandas as pd
from sas7bdat import SAS7BDAT
import numpy as np
import subprocess
from datetime import datetime, date
from csv import DictReader
from shutil import rmtree
from json import load as jsonLoad
import functools
import itertools
from colour import Color
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as patches
import seaborn as sb
import textwrap as txtwrp
import ast
import imageio as imgio
import tqdm
import pickle
import scipy.stats as stats
import statsmodels.stats.api as sms
import scipy.interpolate as interpolate
from statsmodels.stats.weightstats import CompareMeans, DescrStatsW
from statsmodels.discrete.discrete_model import Logit
from statsmodels.tools.tools import add_constant
from sklearn import preprocessing, decomposition, manifold
from sklearn.metrics import confusion_matrix, \
accuracy_score, roc_auc_score, roc_curve, \
classification_report, precision_score, recall_score, explained_variance_score, r2_score, f1_score
from scipy.stats import logistic
from scipy.optimize import curve_fit
import pydot
from tensorflow.keras.metrics import top_k_categorical_accuracy
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, AlphaDropout, LeakyReLU
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import CSVLogger, TensorBoard, Callback, EarlyStopping, ModelCheckpoint
from tensorflow.keras.backend import clear_session
import tensorflow.compat as tfCompat
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "Apache-2.0"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL> <EMAIL>"
""" Short title
Description
Args:
arg1: arg1 Description
Returns:
output1: output1 description.
Raises:
excpeption1: excpetion circumstances.
"""
def loadJSON(fname):
# Load configuration
f = open(fname) # Open config file...
cfg = jsonLoad(f) # Load data...
f.close() # Close config file...
return cfg
def moduleInit():
pd.options.display.max_columns = None
pd.options.display.max_rows = 20
tfCompat.v1.disable_eager_execution()
def rmws(strList):
stripList = []
for s in strList:
stripList.append(s.replace(" ", ""))
return stripList
def timeAppend(varList, T):
timeVarList = []
for v in varList:
timeVarList.append(T + '_' + v)
return timeVarList
def autoscale(x):
return (x-np.min(x))/np.max(x)
def normalise(x):
return (x-np.mean(x))/np.std(x)
def import_SNSS(usr, pwd, local_file=0):
""" Mount UoE CMVM smb and import SNSS as dataframe.
Note you must have access permissions to specific share.
Keyword arguments:
usr = Edinburgh University matriculation number
pwd = <PASSWORD>
Location of data is specified in a JSON config file not included.
The SNSS dataset includes confidential patient information and must be
handled according to Caldicott principles.
"""
cfg = loadJSON("config.json")
if local_file:
print('Importing local data file...')
# Open and read SNSS data file
fp = '../../../../../Volumes/mount/SNSSFull.sas7bdat'
f = SAS7BDAT(fp)
rawDf = f.to_data_frame()
print('Dataframe loaded!')
else:
cmd = "mount_smbfs"
mountCmd = cmd+" //'"+cfg['dom']+";"+usr+":"+pwd+"'@"+cfg['shr']+" "+cfg['mnt']
uMountCmd = 'umount raw_data/mount/'
# Send smb mount command..
print('Mounting datashare...')
smbCall = subprocess.call(mountCmd, shell=True)
# Open and read SNSS data file
f = SAS7BDAT(cfg['fpath'])
print('Converting sas7bdat file to pd.dataframe...')
rawDf = f.to_data_frame()
print('Conversion completed! Closing file...')
f.close()
print('Attempting Unmount..')
try:
smbCall = subprocess.call(uMountCmd, shell=True)
print('dataShare Unmounted Successfully!')
except(OSError, EOFError):
print('Unmount failed...')
return rawDf
def SNSSNullity(raw):
""" Assess nullity of raw data import
Takes the raw imported dataset, ensures index integrity, assigns new binary
variables for follow up at each study timepoint and computes attrittion numbers
and ratios for each.
Args:
raw: Pandas DataFrame object from SAS7BDAT file.
Returns:
raw: The validated raw dataframe.
retentionTable: A pandas dataframe of counts for use in scripts if required.
Raises:
NONE
"""
# Assign nPatid as index variable.
raw = raw.set_index('nPatid', verify_integrity=True)
# Convert diagnostic nullity into binary variable in dataframe.
raw['All'] = raw.T0_PatID.notna()
raw['T1_HCData'] = raw.T1_HealthChange.notna()
raw['T2_HCData'] = raw.T2_HealthChange.notna()
raw['T1and2_HCData'] = (raw.T2_HealthChange.notna()) & (raw.T1_HealthChange.notna())
# Quantify diagnostic nullity and export
T = []
FULabels = ['T1_HCData', 'T2_HCData', 'T1and2_HCData']
for FU in FULabels:
T.append(raw.groupby(FU)['ExpGroups'].agg([('Total', 'count'),
('Label', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[0])),
('N(i)', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1])),
('%', lambda x:
tuple((np.unique(x[~np.isnan(x)],
return_counts=True)[1]/sum(~np.isnan(x))*100).round(2)))]))
retentionTable = pd.concat(T, keys=FULabels, axis=0)
retentionTable.index = retentionTable.index.rename(['', 'FUDataAvailable'])
retentionTable.to_csv('output/0_SNSS_retention.tsv', sep='\t')
return raw, retentionTable
def SNSSCompoundVariables(df):
"""Produce variable compund measures e.g. SF12, HADS etc.
Adds the specified custom variables normally products or sums of other Variables
or binarisation etc to the provided dataframe. This function also undertakes
SIMD quintile mapping to patient postcodes.
Args:
df: Pandas dataframe.
Returns:
df: The dataframe with new variables added..
Raises:
KeyError, ValueError: If errors in postcode mapping.
"""
# Deactivate assignment warning which slows down SIMD processing.
pd.options.mode.chained_assignment = None
# Declare variable groups
varGroups = {'PHQ13': ['StomachPain', 'BackPain', 'Paininarmslegsjoints',
'Headaches', 'Chestpain', 'Dizziness',
'FaintingSpells', 'HeartPoundingorRacing', 'ShortnessofBreath',
'Constipation', 'NauseaorGas', 'Tired', 'Sleeping'],
'NeuroSymptoms': ['Lackofcoordination', 'MemorConcentration', 'LossofSensation',
'LossofVision', 'LossofHearing', 'Paralysisorweakness',
'DoubleorBlurredVision', 'DifficultySwallowing',
'DifficultySpeaking', 'SeizureorFit',
'AnxietyattackorPanicAttack', 'Littleinterestorpleasure',
'Feelingdownorhopeless', 'Nervesorfeelinganxious',
'Worryingalot'],
'IllnessWorry': ['Wworry', 'Wseriousworry', 'Wattention'],
'Satisfaction': ['Sat1', 'Sat2', 'Sat3', 'Sat4', 'Sat5', 'Sat6', 'Sat7', 'Sat8'],
'other': ['LossofHearing', 'Littleinterestorpleasure', 'Feelingdownorhopeless',
'Nervesorfeelinganxious', 'Worryingalot', 'AnxietyattackorPanicAttack']}
# Time specify certain groups into useful keysets.
T0IllnessWorryKeys = timeAppend(varGroups['IllnessWorry'], 'T0')
T0PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T0')
T1PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T1')
T2PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T2')
T0PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T0')
T1PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T1')
T2PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T2')
T0SatisfactionKeys = timeAppend(varGroups['Satisfaction'], 'T0')
T1SatisfactionKeys = timeAppend(varGroups['Satisfaction'], 'T1')
# Criteria Used for defining successful follow up as T1 any satisfaction data available..
# df['T1_Satisfaction_Bool'] = df['T1_Satisfaction_Total'].notna() # Strict
df['T1_Satisfaction_Bool'] = df[T1SatisfactionKeys].notna().any(axis=1) # Loose
# Add binarised ExpGroups.
df['ExpGroups_bin'] = (df['ExpGroups']-2)*-1
# Add binarised gender.
df['Gender_bin'] = df['Gender']-1
# Adding summative compound measures
df['T0_PHQNeuro28_Total'] = df[T0PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T1_PHQNeuro28_Total'] = df[T1PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T2_PHQNeuro28_Total'] = df[T2PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T0_PHQ13_Total'] = df[T0PHQ13Keys].sum(axis=1, skipna=False)
df['T1_PHQ13_Total'] = df[T1PHQ13Keys].sum(axis=1, skipna=False)
df['T2_PHQ13_Total'] = df[T2PHQ13Keys].sum(axis=1, skipna=False)
df['T0_IllnessWorry'] = df[T0IllnessWorryKeys].sum(axis=1, skipna=False)
df['T0_Satisfaction_Total'] = df[T0SatisfactionKeys].sum(axis=1, skipna=False)
df['T1_Satisfaction_Total'] = df[T1SatisfactionKeys].sum(axis=1, skipna=False)
df['T2_Satisfaction_Total'] = df[T1SatisfactionKeys].sum(axis=1, skipna=False)
# Adding boolean compound measures
df['T0_NegExpectation'] = (df['T0_IPQ1'] > 3).astype(int) # Define "Negative Expectation"
df['T0_NegExpectation'].loc[df['T0_IPQ1'].isna()] = np.nan # Boolean operator treats NaN as 0 so replace with NaNs
df['T0_PsychAttribution'] = ((df['T0_C7'] > 3) | (df['T0_C8'] > 3)).astype(int)
df['T0_PsychAttribution'].loc[(df['T0_C7'].isna()) | (df['T0_C8'].isna())] = np.nan
df['T0_LackofPsychAttribution'] = (df['T0_PsychAttribution']-1)*-1
for S in ['T0_Sat1', 'T0_Sat2', 'T0_Sat3',
'T0_Sat4', 'T0_Sat5', 'T0_Sat6', 'T0_Sat7', 'T0_Sat8']:
satNAIdx = df[S].isna()
df[S + '_Poor_Bin'] = df[S] <= 2 # Binarise Satsifaction into Poor/Fair or not
df[S + '_Poor_Bin'].loc[satNAIdx] = np.nan
# Add binned measures
df['T0_PHQ13_Binned'] = pd.cut(df['T0_PHQ13_Total'], [0, 2.1, 5.1, 8.1, 13.1],
labels=['0-2', '3-5', '6-8', '9-13'],
right=True, include_lowest=True)
df['T0_PHQ13_BinInt'] = pd.cut(df['T0_PHQ13_Total'], [0, 2.1, 5.1, 8.1, 13.1],
labels=False,
right=True, include_lowest=True)
df['T0_PHQNeuro28_Binned'] = pd.cut(df['T0_PHQNeuro28_Total'], [0, 5.1, 8.1, 13.1, 27.1],
labels=['0-5', '6-8', '9-13', '14-27'],
right=True, include_lowest=True)
df['T0_PHQNeuro28_BinInt'] = pd.cut(df['T0_PHQNeuro28_Total'], [0, 5.1, 8.1, 13.1, 27.1],
labels=False,
right=True, include_lowest=True)
df['AgeBins'] = pd.cut(df['Age'], [0, 36, 46, 56, max(df['Age'])+0.1],
labels=['<=35', '36-45', '46-55', '>=56'],
right=True, include_lowest=True)
df['AgeBinInt'] = pd.cut(df['Age'], [0, 36, 46, 56, max(df['Age'])+0.1],
labels=False,
right=True, include_lowest=True)
df['T0_HADS_Binned'] = pd.cut(df['T0_HADS'], [0, 7.1, 14.1, 21.1, max(df['T0_HADS'])+0.1],
labels=['0-7', '8-14', '15-21', '>=22'],
right=True, include_lowest=True)
df['T0_HADS_BinInt'] = pd.cut(df['T0_HADS'], [0, 7.1, 14.1, 21.1, max(df['T0_HADS'])+0.1],
labels=False,
right=True, include_lowest=True)
df['T0_SF12_PF_Binned'] = pd.cut(df['T0_SF12_PF'], [-0.1, 24.9, 49.9, 74.9, 99.9, 100.1],
labels=['0', '25', '50', '75', '100'],
right=True, include_lowest=True)
df['T0_SF12_PF_BinInt'] = pd.cut(df['T0_SF12_PF'], [-0.1, 24.9, 49.9, 74.9, 99.9, 100.1],
labels=False,
right=True, include_lowest=True)
# Add binarised outcomes
poorOutcomeDict = {0: 1, 1: 1, 2: 1, 3: 0, 4: 0}
strictPoorOutcomeDict = {0: 1, 1: 1, 2: 0, 3: 0, 4: 0}
ternaryPoorOutcomeDict = {0: 2, 1: 2, 2: 1, 3: 0, 4: 0}
df['T1_poorCGI'] = df['T1_HealthChange'].replace(poorOutcomeDict)
df['T1_poorIPS'] = df['T1_SymptomsChange'].replace(poorOutcomeDict)
df['T2_poorCGI'] = df['T2_HealthChange'].replace(poorOutcomeDict)
df['T2_poorIPS'] = df['T2_SymptomsChange'].replace(poorOutcomeDict)
df['T2_strictPoorCGI'] = df['T2_HealthChange'].replace(strictPoorOutcomeDict)
df['T2_strictPoorIPS'] = df['T2_SymptomsChange'].replace(strictPoorOutcomeDict)
df['T2_ternaryCGI'] = df['T2_HealthChange'].replace(ternaryPoorOutcomeDict)
df['T2_ternaryIPS'] = df['T2_SymptomsChange'].replace(ternaryPoorOutcomeDict)
# Add relative secondary outcomes
df['T0T1_SF12_NormedMCS'] = df['T1_SF12_NormedMCS'] - df['T0_SF12_NormedMCS']
df['T1T2_SF12_NormedMCS'] = df['T2_SF12_NormedMCS'] - df['T1_SF12_NormedMCS']
df['T0T2_SF12_NormedMCS'] = df['T2_SF12_NormedMCS'] - df['T0_SF12_NormedMCS']
df['T0T2_SF12_binaryNormedMCS'] = (df['T0T2_SF12_NormedMCS'] < 0).astype(int)
df['T0T2_SF12_binaryNormedMCS'].loc[df['T0T2_SF12_NormedMCS'].isna()] = np.nan
df['T0T1_SF12_NormedPCS'] = df['T1_SF12_NormedPCS'] - df['T0_SF12_NormedPCS']
df['T1T2_SF12_NormedPCS'] = df['T2_SF12_NormedPCS'] - df['T1_SF12_NormedPCS']
df['T0T2_SF12_NormedPCS'] = df['T2_SF12_NormedPCS'] - df['T0_SF12_NormedPCS']
df['T0T2_SF12_binaryNormedPCS'] = (df['T0T2_SF12_NormedPCS'] < 0).astype(int)
df['T0T2_SF12_binaryNormedPCS'].loc[df['T0T2_SF12_NormedPCS'].isna()] = np.nan
df['T0T1_HADS'] = df['T1_HADS'] - df['T0_HADS']
df['T1T2_HADS'] = df['T2_HADS'] - df['T1_HADS']
df['T0T2_HADS'] = df['T2_HADS'] - df['T0_HADS']
df['T0T2_binaryHADS'] = (df['T0T2_HADS'] < 0).astype(int)
df['T0T2_binaryHADS'].loc[df['T0T2_HADS'].isna()] = np.nan
df['T0T1_PHQNeuro28_Total'] = df['T1_PHQNeuro28_Total'] - df['T0_PHQNeuro28_Total']
df['T1T2_PHQNeuro28_Total'] = df['T2_PHQNeuro28_Total'] - df['T1_PHQNeuro28_Total']
df['T0T2_PHQNeuro28_Total'] = df['T2_PHQNeuro28_Total'] - df['T0_PHQNeuro28_Total']
df['T0T2_binaryPHQNeuro28_Total'] = (df['T0T2_PHQNeuro28_Total'] < 0).astype(int)
df['T0T2_binaryPHQNeuro28_Total'].loc[df['T0T2_PHQNeuro28_Total'].isna()] = np.nan
print('SIMD 2004 to 2006 Postcode conversion...')
SIMD04 = pd.read_csv('raw_data/SIMDData/postcode_2006_2_simd2004.csv', index_col=0)
nullIdx = SIMD04['simd2004rank'].str.contains(' ')
domains = ['inc', 'emp', 'hlth', 'educ', 'access', 'house']
for d in domains:
SIMD04['simd2004_' + d + '_quintile'] = 5-pd.qcut(SIMD04['simd2004_' + d + '_rank']
[~nullIdx].astype(float), 5,
retbins=False, labels=False)
SIMDDict = dict(zip([str.replace(' ', '') for str in SIMD04.sort_index().index.values.tolist()],
SIMD04[['simd2004_sc_quintile',
'simd2004score',
'simd2004_inc_score',
'simd2004_emp_score',
'simd2004_hlth_score',
'simd2004_educ_score',
'simd2004_access_score',
'simd2004_house_score',
'simd2004_inc_quintile',
'simd2004_emp_quintile',
'simd2004_hlth_quintile',
'simd2004_educ_quintile',
'simd2004_access_quintile',
'simd2004_house_quintile']].values))
# Initialising variables as NaN arrays
df['T0_SIMD04'] = np.nan
df['T0_SIMD04_score'] = np.nan
for d in domains:
df['T0_SIMD04_' + d + '_score'] = np.nan
df['T0_SIMD04_' + d + '_quintile'] = np.nan
print('Constructed SIMD quintiles and Initialised Panda Variables')
print('Iterating through postcodes')
i = 0
for p in df['Postcode']:
if (p == '') | pd.isnull(p):
df['Postcode'].iloc[i] = np.nan
df['T0_SIMD04'].iloc[i] = np.nan
i = i + 1
# print('No Postcode Data')
else:
try:
p = p.replace(' ', '')
# print(p)
df['T0_SIMD04'].iloc[i] = int(SIMDDict[p][0])
df['T0_SIMD04_score'].iloc[i] = float(SIMDDict[p][1])
dd = 2
for d in domains:
df['T0_SIMD04_' + d + '_score'].iloc[i] = float(SIMDDict[p][dd])
df['T0_SIMD04_' + d + '_quintile'].iloc[i] = int(SIMDDict[p][dd+len(domains)])
dd += 1
except (KeyError, ValueError) as err:
# print('%s: Error!' % (p))
df['T0_SIMD04'].iloc[i] = np.nan
# print('No SIMD04 postcode map')
i = i + 1
# Add most deprived binarisation
df['T0_SIMD04_bin'] = df['T0_SIMD04'] >= 4
# Add interaction variables
df['Diagnosis*T0_IncapacityBenefitorDLA'] = df['Diagnosis']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups*T0_IncapacityBenefitorDLA'] = df['ExpGroups']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups_bin*T0_IncapacityBenefitorDLA'] = df['ExpGroups_bin']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups_bin*T0_LackofPsychAttribution'] = df['ExpGroups_bin']*df['T0_LackofPsychAttribution']
df['ExpGroups_bin*T0_SIMD04_bin'] = df['ExpGroups_bin']*df['T0_SIMD04_bin']
df['ExpGroups_bin*T0_SF12_PF_BinInt'] = df['ExpGroups_bin']*df['T0_SF12_PF_BinInt']
df['ExpGroups_bin*T0_NegExpectation'] = df['ExpGroups_bin']*df['T0_NegExpectation']
df['ExpGroups_bin*Gender_bin'] = df['ExpGroups_bin']*df['Gender_bin']
print('Complete!')
return df
def cohen_d(x, y):
stats = {}
nx = len(x); meanx = np.mean(x); stdx = np.std(x, ddof=1); semx = stdx/np.sqrt(nx);
ny = len(y); meany = np.mean(y); stdy = np.std(y, ddof=1); semy = stdy/np.sqrt(ny);
meancix = [meanx+(1.96*i*semx) for i in [-1, 1]]
meanciy = [meany+(1.96*i*semy) for i in [-1, 1]]
dof = nx + ny - 2
d = (meanx - meany) / np.sqrt(((nx-1)*stdx ** 2 +
(ny-1)*stdy ** 2) / dof)
vard = (((nx+ny)/(nx*ny))+((d**2)/(2*(nx+ny-2))))*((nx+ny)/(nx+ny-2))
sed = np.sqrt(vard)
cid = [d+(1.96*i*sed) for i in [-1, 1]]
stats['d'] = d
stats['cid'] = cid
stats['mean'] = [meanx, meany]
stats['std'] = [stdx, stdy]
stats['sem'] = [semx, semy]
return d, stats
def cramersV(nrows, ncols, chisquared, correct_bias=True):
nobs = nrows*ncols
if correct_bias is True:
phi = 0
else:
phi = chisquared/nobs
V = np.sqrt((phi**2)/(min(nrows-1, ncols-1)))
return V, phi
def partitionData(df, partitionRatio=0.7):
""" Partition data into training and evaluation sets
Takes a dataframe and returns two arrays with the proportion to use for
training declared as the partition ratio and the other as evaluation of
(1-partitionRatio) size.
Args:
df: Pandas DataFrame to be partitioned.
partitionRatio: Ratio of the data to be used for training.
Returns:
trainIdx: The indices of data asssigned to training set.
evalIdx: The indices of data asssigned to eval set.
Raises:
NONE
"""
randIdx = np.linspace(0, df.shape[0]-1, df.shape[0]).astype(int)
np.random.shuffle(randIdx)
trainIdx = randIdx[0:round(df.shape[0]*partitionRatio)]
evalIdx = randIdx[round(df.shape[0]*(partitionRatio)):len(randIdx)]
return trainIdx, evalIdx
def FollowUpandBaselineComparison(df):
""" A group-wise and follow-up wise comparison of declared Vars
Takes a pandas dataframe and as per the declared variables of interest below,
compares between groups and between lost to follow up and retained.
Args:
df: Pandas DataFrame to be assessed.
Returns:
NONE: All relevant tables are exported to CSV in the function.
Raises:
NONE
"""
def sigTest(G, varList, vType, df):
sigDict = {}
if vType == 'cat':
for v in varList:
T = pd.crosstab(index=df[G], columns=df[v],
margins=False, normalize=False)
chi2Stat, chi2p, _, _ = stats.chi2_contingency(T, correction=True)
cats = np.unique(df[v].dropna())
if len(cats) == 2:
LOR = np.log((T.iloc[0,0]*T.iloc[1,1])/(T.iloc[1,0]*T.iloc[0,1]))
SE = np.sqrt((1/T.iloc[0,0])+(1/T.iloc[1,0])+(1/T.iloc[0,1])+(1/T.iloc[1,1]))
CI = [np.exp(LOR-1.96*SE), np.exp(LOR+1.96*SE)]
OR = np.exp(LOR)
else:
OR = np.nan
CI = np.nan
sigDict[v] = [chi2p, chi2Stat, OR, CI]
elif vType == 'cont':
for v in varList:
if G == 'ExpGroups':
Gi = [1, 2]
elif G == 'T2_HCData':
Gi = [0, 1]
elif G == 'T2_poorCGI':
Gi = [0, 1]
cm = CompareMeans.from_data(df[v][(df[G] == Gi[0]) & (df[v].notna())],
df[v][(df[G] == Gi[1]) & (df[v].notna())])
tStat, tp, _ = cm.ttest_ind()
cohend, cohenstat = cohen_d(cm.d1.data, cm.d2.data)
sigDict[v] = [tp, tStat, cohend, cohenstat['cid']]
sigT = | pd.DataFrame.from_dict(sigDict, orient='index', columns=['p', 'stat', 'effect', 'effectCI']) | pandas.DataFrame.from_dict |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = | tm.makeTimeSeries() | pandas.util.testing.makeTimeSeries |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import datetime
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Get all foodpanda orders
def get_foodpanda_orders(orders, cookie):
url = "https://www.foodpanda.sg/orders"
cookies = {
'cookie': cookie
}
resp = requests.get(url, cookies=cookies)
html = resp.content
soup = BeautifulSoup(html, 'html.parser')
order_entry = soup.findAll('li')
order_entry = list(set(order_entry))
for order in order_entry:
if order is not None:
order_plc_date = order.find("h4", {"class": "vendor-name"})
if order_plc_date is not None:
vendor = order_plc_date.getText().strip().replace("\n", "").split(",", 1)[0]
order_date = order_plc_date.find("span", {"class": "order-date"}).getText().strip()
order_date = datetime.datetime.strptime(order_date, "%d %b, %Y")
else:
continue
bill = order.find("div", {"class": "total"})
if bill is not None:
cost = bill.getText().replace(" ", "").replace("\n", "").split(':')[1]
contents = order.find("ul", {"class": "order-product-list"})
if contents is not None:
food = contents.getText().split("\n")
items = [i.strip() for i in food if i.strip()]
orders.append({'Restaurant': vendor, 'Date': order_date,
'Cost': cost, 'Items': items,
'Service': 'foodpanda'})
return orders
if __name__ == "__main__":
# Store all orders in a list
orders = []
# Store cookies for all your users in this python list
# If only 1 user then just place cookie for that user
cookies = ['cookies for user 1', 'cookies for user 2']
# Get foodpanda order for all users
for cookie in cookies:
foodpanda_orders = get_foodpanda_orders(orders, cookie)
# Convert orders to pandas dataframe
df = | pd.DataFrame(orders) | pandas.DataFrame |
#!/usr/bin/env python3.6
import pandas as pd
from collections import defaultdict, Counter
import argparse
import sys
import os
import subprocess
import re
import numpy as np
from datetime import datetime
from itertools import chain
from pyranges import PyRanges
from SV_modules import *
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', None)
pd.options.display.max_rows = 999
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def createGeneSyndromeDict(database_df):
dict = defaultdict(list)
for var, hpo in database_df.itertuples(index=False): # var can either be gene or syndrome
dict[var].append(hpo)
return(dict)
def createWeightDict(weights):
try:
w_df = pd.read_csv(weights, sep = ' ', names=["HPO_id", "weight"], comment = '#')
except OSError:
print("Count not open/read the input file:" + weights)
sys.exit()
weightDict = dict(zip(w_df.HPO_id, w_df.weight))
return(weightDict)
def getClinicalPhenome(args):
# Get the clinical phenome and store as a set
try:
clinical_phenome = set(open("./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt").read().splitlines())
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt")
sys.exit()
return(clinical_phenome)
def calculateGeneSumScore(args, hpo_gene_dict, weightDict, clinical_phenome, omim_gene):
# Go through genes in genelist found in the patients
try:
genes = open("./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt", 'r')
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt")
sys.exit()
with genes:
gene = genes.read().splitlines()
gene_sum_score = 0
gene_score_result = pd.DataFrame(columns=['gene', 'score'])
for query in gene:
#print(query)
hpo_pheno = set(hpo_gene_dict[query]) # To get the phenotypic features for a given gene
overlap = hpo_pheno.intersection(clinical_phenome) # overlap all the phenotypic features with the clinical phenomes
for term in overlap:
gene_sum_score += weightDict[term]
gene_score_result = gene_score_result.append({'gene':query, 'score':gene_sum_score}, ignore_index=True)
gene_score_result_r = gene_score_result.iloc[::-1]
gene_score_result_r = pd.concat([gene_score_result_r, omim_gene])
gene_score_result_r = normalizeRawScore(args, gene_score_result_r, 'gene')
return(gene_score_result_r)
def getParentsGeno(filtered_intervar, inheritance_mode, ov_allele):
# Create two new columns and initialize to 0
filtered_intervar[inheritance_mode] = 0
filtered_intervar = filtered_intervar.reset_index(drop=True)
for idx, row in enumerate(filtered_intervar.itertuples(index=False)):
if int(getattr(row, 'Start')) in set(ov_allele['Start']):
#parents_geno = ov_allele.loc[ov_allele['Start'] == getattr(row, 'Start'), 'geno'].head(1)
#print(parents_geno)
parents_geno = ov_allele.loc[ov_allele['Start']==getattr(row,'Start'),'geno'].head(1).item()
filtered_intervar.loc[idx, inheritance_mode] = parents_geno
return(filtered_intervar)
def rerankSmallVariant(df):
df['Clinvar_idx'] = df.Clinvar.str[9:-1]
df['InterVar_idx'] = df.InterVar_InterVarandEvidence.str[10:].str.split('PVS1').str[0]
df[['Clinvar_idx', 'InterVar_idx']] = df[['Clinvar_idx', 'InterVar_idx']].apply(lambda x:x.astype(str).str.lower())
df['Clinvar_score'], df['InterVar_score'] = 3, 3
# Calculate Clinvar score
df.loc[(df['Clinvar_idx'].str.contains('benign')), 'Clinvar_score'] = 1
df.loc[((df['Clinvar_idx'].str.contains('benign')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 2
df.loc[(df['Clinvar_idx'].str.contains('pathogenic')), 'Clinvar_score'] = 5
df.loc[((df['Clinvar_idx'].str.contains('pathogenic')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 4
df.loc[(df['Clinvar_idx'].str.contains('conflicting')), 'Clinvar_score'] = 3
# Calculate Intervar score
df.loc[(df['InterVar_idx'].str.contains('benign')), 'InterVar_score'] = 1
df.loc[((df['InterVar_idx'].str.contains('benign')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 2
df.loc[(df['InterVar_idx'].str.contains('pathogenic')), 'InterVar_score'] = 5
df.loc[((df['InterVar_idx'].str.contains('pathogenic')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 4
# Add them up
df['Patho_score'] = df['Clinvar_score'] + df['InterVar_score']
# Sort by the total patho_score
df = df.sort_values(by=['Patho_score', 'score'], ascending=False)
df = df.drop(['Clinvar_idx', 'InterVar_idx', 'Clinvar_score', 'InterVar_score', 'Patho_score'], axis=1)
return df
def smallVariantGeneOverlapCheckInheritance(args, smallVariantFile, interVarFinalFile, gene_score_result_r, famid):
# Overlap gene_score_result_r with small variants genes found in the proband
gene_score_result_r = gene_score_result_r[gene_score_result_r.gene.isin(smallVariantFile.gene)]
# Subset the intervar files further to store entries relevant to these set of genes
filtered_intervar = pd.merge(interVarFinalFile, gene_score_result_r, left_on='Ref_Gene', right_on='gene',how='inner')
# Remove common artifacts
try:
artifacts = pd.read_csv("./common_artifacts_20.txt", names = ["gene"])
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Ref_Gene'].isin(artifacts['gene'])]
except OSError:
print("Could not open/read the input file: common_artifacts_20.txt")
sys.exit()
# If custom artifact bed file is provided, filter dataframe
if os.path.exists(args.artifact):
#print(filtered_intervar)
custom_artifact = pd.read_csv(args.artifact, sep='\t', usecols=[0, 2] ,names=["Chr", "End"])
keys = list(custom_artifact.columns.values)
i1 = filtered_intervar.set_index(keys).index
i2 = custom_artifact.set_index(keys).index
filtered_intervar = filtered_intervar.loc[~i1.isin(i2)]
# Create a bed file and write it out
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariant_candidates.txt', index=False, sep='\t',header=False) # Write out a subset of the variant first
filtered_intervar_bed = filtered_intervar[['Chr', 'Start', 'End']]
filtered_intervar_bed.loc[:,'Chr'] = 'chr' + filtered_intervar_bed.loc[:,'Chr'].astype(str)
filtered_intervar_bed.loc[:,'Start'] -= 1
pd.DataFrame(filtered_intervar_bed).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_target.bed', index=False, sep='\t', header=False)
# Create two new columns and initialize to -1
# will later get overwritten to 0/1/2 if parents vcf files are provided
filtered_intervar['paternal'] = -1
filtered_intervar['maternal'] = -1
if args.type != 'singleton':
# Get overlapping variants from the parents so we know which variants are inherited
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Comparing small variants (SNPs/indels) inheritance')
cmd1 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.fathervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf"
cmd2 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.mothervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf"
if args.type == 'duo':
if args.father_duo:
cmds = [cmd1]
else:
cmds = [cmd2]
else:
cmds = [cmd1, cmd2]
for cmd in cmds:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Go through every row in filtered_intervar and see if the same variant is found in either of the parents
# We will only compare allele start position (we always assume the alt allele is the same)
if args.type=='trio' or args.father_duo:
try:
paternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
paternal_ov_allele['geno'] = paternal_ov_allele['geno'].str[:1].astype(int) + paternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'paternal', paternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf")
sys.exit()
if args.type=="trio" or args.mother_duo:
try:
maternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
maternal_ov_allele['geno'] = maternal_ov_allele['geno'].str[:1].astype(int) + maternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'maternal', maternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf")
sys.exit()
# Rerank variants based on reported or predicted pathogeneicity
filtered_intervar = rerankSmallVariant(filtered_intervar)
if args.type=='trio':
# Divide the dataset into recessive, dominant, de novo, compound het
## Recessive
recessive = filtered_intervar[(filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 1) & (filtered_intervar['Otherinfo'] == 'hom')]
## Dominant
dominant_inherited = filtered_intervar[((filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 0)) | ((filtered_intervar['maternal'] == 1) & (filtered_intervar['paternal'] == 0))]
## De novo
denovo = filtered_intervar[(filtered_intervar['paternal'] == 0) & (filtered_intervar['maternal'] == 0)]
#Compound het
filtered_intervar_compoundhet = filtered_intervar[(filtered_intervar['Otherinfo'] == 'het')]
filtered_intervar_compoundhet = filtered_intervar_compoundhet[(filtered_intervar_compoundhet['maternal'] != 2) & (filtered_intervar_compoundhet['paternal'] != 2) & ((filtered_intervar_compoundhet['paternal'] == 1) & (filtered_intervar_compoundhet['maternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 1) & (filtered_intervar_compoundhet['paternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 0) & (filtered_intervar_compoundhet['paternal'] == 0))]
count = Counter(filtered_intervar_compoundhet['Ref_Gene'])
compoundhet_genes = [x for x, cnt in count.items() if cnt > 1]
compoundhet = filtered_intervar_compoundhet[filtered_intervar_compoundhet['Ref_Gene'].isin(compoundhet_genes)]
discard = []
for gene in compoundhet_genes:
df = compoundhet[compoundhet['Ref_Gene'].str.contains(gene)]
row_count = len(df.index)
col_list = ['paternal', 'maternal']
res = df[col_list].sum(axis=0)
if ((res[0] == 0) & (res[1] == row_count)) or (res[1] == 0 & (res[0] == row_count)):
discard.append(gene)
compoundhet = compoundhet[~compoundhet['Ref_Gene'].isin(discard)]
# Print all the variants according to inheritance mode
# Recessive
pd.DataFrame(recessive).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_recessive_candidates.txt', index=False, sep='\t', header=True)
# Dominant
pd.DataFrame(dominant_inherited).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_dominant_inherited_smallVariants_candidates.txt', index=False, sep='\t', header=True)
# De novo
pd.DataFrame(denovo).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_denovo_candidates.txt', index=False, sep='\t', header=True)
# Compound het
pd.DataFrame(compoundhet).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_compoundhet_candidates.txt', index=False, sep='\t', header=True)
if args.xlink:
xlink = filtered_intervar.loc[(filtered_intervar['maternal']!=2) & (filtered_intervar['paternal']==0) & (filtered_intervar['Chr'] == 'X')]
pd.DataFrame(xlink).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_xlink_candidates.txt', index=False, sep='\t', header=True)
# All
filtered_intervar = rerankSmallVariant(filtered_intervar)
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariants_ALL_candidates.txt', index=False, sep='\t', header=True)
if args.type=='trio':
# We want to return everything except recessive variants
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Start'].isin(recessive['Start'])] # don't have recessive if singleton or duo
return filtered_intervar
def differentialDiangosis(hpo_syndrome_dict, weightSyndromeDict, clinical_phenome, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup,hpo_syndromes_mim_df):
syndrome_score_result = pd.DataFrame(columns=['syndrome', 'score'])
# Check every syndrome and its overlapping hpo terms
for syndrome in hpo_syndrome_dict:
hpo_terms = set(hpo_syndrome_dict[syndrome])
score = 0
for term in hpo_terms:
if term in clinical_phenome:
score += weightSyndromeDict[term]
if score != 0:
syndrome_score_result = syndrome_score_result.append({'syndrome': syndrome, 'score': score}, ignore_index=True)
syndrome_score_result_r = syndrome_score_result.sort_values(by='score', ascending=False)
syndrome_score_result_r['syndrome'] = syndrome_score_result_r['syndrome'].str.upper()
# Add a normalized score column
syndrome_score_result_r = normalizeRawScore(args, syndrome_score_result_r, 'syndrome')
# Specifically look for deletion/duplication syndrome
delDupSyndrome(syndrome_score_result_r, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup, hpo_syndromes_mim_df)
return(syndrome_score_result_r)
def findGenomicLocation(cytoband_key, cytobandDict):
#print(cytoband_key)
keys = [key for key in cytobandDict if key.startswith(cytoband_key)]
#print(keys)
if len(keys)==0:
cytoband_key = cytoband_key[:-1]
keys = [key for key in cytobandDict if key.startswith(cytoband_key)]
genomic_coords_list = []
for key in keys:
genomic_coords_list.append(str(cytobandDict[key]).split('-'))
#print(genomic_coords_list)
genomic_coords_list = list(chain.from_iterable(genomic_coords_list))
min_coords = min(genomic_coords_list)
max_coords = max(genomic_coords_list)
genomic_range = str(min_coords) + '-' + str(max_coords)
return genomic_range
def parseSyndromeNameToCytoband(df, cytobandDict, type, hpo_syndromes_mim_df,args):
if type=='deldup':
df['cytoband'] = float('Nan')
regex = r'((^|\W)[0-9XY]{1,2}[PQ]{1}[\w\\.\\-]{1,15}[\s$])'
for index, row in df.iterrows():
m = re.search(regex, str(row))
if m is not None:
df.loc[index, 'cytoband'] = m.group(1)
df.dropna(subset=['cytoband'], inplace=True)
if df.empty: # df can be empty after dropping NA
return pd.DataFrame()
if type=='all':
df = df.merge(hpo_syndromes_mim_df, on=['syndrome'])
try:
morbid = pd.read_csv(args.workdir + '/morbidmap.txt', sep='\t', usecols=[2, 3], names=["MIM", "cytoband"], comment='#')
df = df.merge(morbid, on='MIM')
df = df.loc[~df['cytoband'].astype(str).str.contains("Chr")]
end_string = ('p','q')
df = df.loc[~df['cytoband'].str.endswith(end_string)] #Remove cytoband entries that span the whole chromosomal arm like 2p
except OSError:
print("Could not open/read the input file: " + args.workdir + '/morbidmap.txt')
sys.exit()
df['cytoband'] = df['cytoband'].astype(str).str.lower()
df['cytoband'] = df['cytoband'].str.replace('x', 'X')
df['cytoband'] = df['cytoband'].str.replace('y', 'Y')
df['cytoband'] = df['cytoband'].str.strip('\(\)')
df[['Chromosome', 'discard']] = df.cytoband.str.split('p|q', 1, expand=True)
df = df.drop('discard', axis=1)
if df.cytoband.str.contains('-').any():
df[['cytoband_start', 'cytoband_stop']] = df.cytoband.str.split('-', expand=True)
else:
df['cytoband_start'] = df.cytoband
df['cytoband_stop'] = None
df['arm'] = np.where(df['cytoband_start'].str.contains('p'), 'p', 'q')
df['cytoband_stop'] = np.where(df['cytoband_start'].str.count('p|q')>1, df['arm'] + df['cytoband_start'].str.split('p|q').str[2], df['cytoband_stop'])
df['cytoband_start'] = np.where(df['cytoband_start'].str.count('p|q')>1, df['cytoband_start'].str.split('p|q').str[0] + df['arm'] + df['cytoband_start'].str.split('p|q').str[1], df['cytoband_start'])
for idx, row in df.iterrows():
cytoband_start_key = row['cytoband_start'].replace(" ","")
if cytoband_start_key in cytobandDict:
coords_start = cytobandDict[cytoband_start_key]
else:
genomic_range = findGenomicLocation(cytoband_start_key, cytobandDict)
coords_start = genomic_range
if row['cytoband_stop'] is not None: # Fix cytoband_stop column for quick cytobandDict lookup
current_chr = np.where(('p' in str(row['cytoband_stop'])) or ('q' in str(row['cytoband_stop'])), str(row['Chromosome']), str(row['Chromosome']) + str(row['arm']))
edited_cytoband_stop = str(current_chr) + row['cytoband_stop']
edited_cytoband_stop = edited_cytoband_stop.replace(" ", "")
df.at[idx, 'cytoband_stop'] = edited_cytoband_stop
if edited_cytoband_stop in cytobandDict:
coords_stop = cytobandDict[edited_cytoband_stop]
else:
genomic_range = findGenomicLocation(edited_cytoband_stop, cytobandDict)
coords_stop = genomic_range
# New coords will be the the beginning of coords_start and end of coords_stop
df.at[idx, 'Start'] = coords_start.split('-')[0]
df.at[idx, 'End'] = coords_stop.split('-')[1]
else:
df.at[idx, 'Start'] = coords_start.split('-')[0]
df.at[idx, 'End'] = coords_start.split('-')[1]
return df
def createCytobandDict(args):
try:
cyto = pd.read_csv(args.workdir + '/cytoband.txt', sep = '\t', names=["cytoband", "coords"], comment = '#')
except OSError:
print("Count not open/read the input file:" + args.workdir + '/cytoband.txt')
sys.exit()
cytobandDict = dict(zip(cyto.cytoband, cyto.coords))
return(cytobandDict)
def delDupSyndrome(syndrome_score_result_r, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup, hpo_syndromes_mim_df):
#print(syndrome_score_result_r)
syndrome_score_result_r.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_syndrome_score_result_r.txt', sep='\t', index=False)
# Create cytoband <-> genomic coordinates dict
cytobandDict = createCytobandDict(args)
del_cond = syndrome_score_result_r['syndrome'].str.contains('DELETION')
dup_cond = syndrome_score_result_r['syndrome'].str.contains('DUPLICATION')
del_df = syndrome_score_result_r[del_cond]
dup_df = syndrome_score_result_r[dup_cond]
del_df = parseSyndromeNameToCytoband(del_df, cytobandDict,'deldup',hpo_syndromes_mim_df, args)
dup_df = parseSyndromeNameToCytoband(dup_df, cytobandDict,'deldup',hpo_syndromes_mim_df, args)
all_omim_syndromes = parseSyndromeNameToCytoband(syndrome_score_result_r, cytobandDict,'all', hpo_syndromes_mim_df, args)
if args.bionano:
cols = ['Chromosome', 'Start', 'End', 'SmapEntryID', 'Confidence', 'Type', 'Zygosity', 'Genotype', 'SV_size', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'score', 'normalized_score']
# Overlap with del/dup syndromes
if cyto_BN_dup is not None: # It can be None because old Bionano pipeline doesn't call duplications...
# dup_df.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input1.txt', sep='\t', index=False)
# cyto_BN_dup.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t',index=False)
# cyto_10x_dup_largeSV.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t',index=False)
overlap_dup_BN = delDupSyndromeSVOverlap(dup_df, cyto_BN_dup, cols)
overlap_dup_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_duplication_syndrome.txt', sep='\t', index=False)
else:
overlap_dup_BN = None
pd.DataFrame().to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_duplication_syndrome.txt', sep='\t', index=False)
overlap_del_BN = delDupSyndromeSVOverlap(del_df, cyto_BN_del, cols)
overlap_del_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_deletion_syndrome.txt', sep='\t', index=False)
all_BN = pd.concat([cyto_BN_dup, cyto_BN_del], ignore_index=True)
overlap_all_BN = delDupSyndromeSVOverlap(all_omim_syndromes, all_BN, cols)
overlap_all_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_all_syndrome.txt', sep='\t', index=False)
if args.linkedreadSV:
cols = ['Chromosome', 'Start', 'End', 'ID', 'REF', 'ALT_1', 'QUAL', 'FILTER_PASS', 'SVLEN', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'score', 'normalized_score']
# dup_df.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input1.txt', sep='\t', index=False)
# cyto_10x_dup_largeSV.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t', index=False)
overlap_dup_largeSV_10x = delDupSyndromeSVOverlap(dup_df, cyto_10x_dup_largeSV, cols)
overlap_dup_largeSV_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_duplication_largeSV_syndrome.txt', sep='\t', index=False)
overlap_del_largeSV_10x = delDupSyndromeSVOverlap(del_df, cyto_10x_del_largeSV, cols)
overlap_del_largeSV_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_deletion_largeSV_syndrome.txt', sep='\t', index=False)
overlap_del_10x = delDupSyndromeSVOverlap(del_df, cyto_10x_del, cols)
overlap_del_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_deletion_syndrome.txt', sep='\t', index=False)
all_10x = pd.concat([cyto_10x_dup_largeSV, cyto_10x_del_largeSV, cyto_10x_del], ignore_index=True)
overlap_all_10x = delDupSyndromeSVOverlap(all_omim_syndromes, all_10x, cols)
overlap_all_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_all_syndrome.txt', sep='\t', index=False)
if args.linkedreadSV and args.bionano:
cols = ['Chromosome', 'Start', 'End', 'ID', 'REF', 'ALT_1', 'QUAL', 'FILTER_PASS', 'SVLEN', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'SmapEntryID', 'Confidence', 'Type', 'Zygosity', 'Genotype', 'SV_size', 'Found_in_Father_b', 'Found_in_Mother_b', 'score', 'normalized_score',]
# syndrome appearing in both 10x and bionano --> confident set
## for duplications
if ((overlap_dup_BN is not None) and (not overlap_dup_BN.empty) and (not overlap_dup_largeSV_10x.empty)):
overlap_dup_largeSV_10x = overlap_dup_largeSV_10x.loc[overlap_dup_largeSV_10x['SVLEN'] >= 1000]
confident_dup_syndrome = delDupSyndromeSVOverlap(overlap_dup_largeSV_10x, overlap_dup_BN, cols)
if not confident_dup_syndrome.empty:
confident_dup_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_syndrome.txt', sep='\t',index=False)
else: # Write an empty dataframe
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_syndrome.txt', sep='\t',index=False)
## for deletions
del_10x = pd.concat([overlap_del_largeSV_10x, overlap_del_10x])
if ((not overlap_del_BN.empty) and (not del_10x.empty)):
del_10x = del_10x.loc[del_10x['SVLEN'] <= (-1000)]
confidnet_del_syndrome = delDupSyndromeSVOverlap(del_10x, overlap_del_BN, cols)
#confidnet_del_syndrome = pd.merge(del_10x, overlap_del_BN, on='syndrome', how='inner')
if not confidnet_del_syndrome.empty:
confidnet_del_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_syndrome.txt', sep='\t',index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_syndrome.txt', sep='\t',index=False)
# for all omim syndromes
if ((not overlap_all_BN.empty) and (not overlap_all_10x.empty)):
overlap_all_10x = overlap_all_10x.loc[(overlap_all_10x['SVLEN'] <= (-1000)) | (overlap_all_10x['SVLEN'] >=1000)]
confident_all_syndrome = delDupSyndromeSVOverlap(overlap_all_10x, overlap_all_BN, cols)
if not confident_all_syndrome.empty:
confident_all_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_all_syndrome.txt', sep='\t',index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_all_syndrome.txt', sep='\t',index=False)
def delDupSyndromeSVOverlap(del_df, cyto_BN_del, cols):
if del_df.empty:
return pd.DataFrame()
del_df['Chromosome'] = del_df['Chromosome'].str.strip()
if 'cytoband_stop' in list(del_df.columns):
del_df = del_df.drop(['cytoband_start','cytoband_stop'], axis=1)
del_df.dropna( inplace=True)
overlap_del_BN = PyRanges(cyto_BN_del).join(PyRanges(del_df))
if not overlap_del_BN.df.empty:
overlap_del_BN = overlap_del_BN.df
overlap_del_BN['overlap_len'] = np.maximum(0, np.minimum(overlap_del_BN.End, overlap_del_BN.End_b) - np.maximum(overlap_del_BN.Start,overlap_del_BN.Start_b))
#overlap_del_BN = overlap_del_BN.drop(like="_b")
overlap_del_BN = overlap_del_BN.sort_values(by='score', ascending=False)
overlap_del_BN = overlap_del_BN.loc[overlap_del_BN['overlap_len'] > 0]
# print(overlap_del_BN)
#overlap_del_BN = overlap_del_BN.df.sort_values(by='score', ascending=False)
# Rearrange the column
overlap_del_BN = overlap_del_BN[cols].drop_duplicates()
return overlap_del_BN
else:
return overlap_del_BN.df
def normalizeRawScore(args, raw_score, mode):
# Normalize all the scores to 1-100
max_score = max(raw_score['score'])
raw_score.loc[:,'normalized_score'] = raw_score.loc[:,'score']/max_score * 100
return(raw_score)
def compileControlFiles(control_files_path, famid):
full_paths = []
for path in control_files_path:
control_files = os.listdir(path)
for file in control_files:
if not (re.match('BC...0[34]{1}', file) or re.match(rf"BC{famid}..", file)): # Discard trio of interest and all probands
full_paths.append(os.path.join(path, file))
full_paths.append(os.path.join(path, file))
return full_paths
def bionanoSV(args, famid, gene_score_result_r, all_small_variants):
# Generate controls files (1KGP BN samples + CIAPM parents (excluding parents of the proband of interest)
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Generating bionano control file...')
control_files_path = [args.workdir + "/bionano_sv/controls/DLE", args.workdir + "/bionano_sv/controls/BspQI", args.workdir + "/bionano_sv/cases/DLE", args.workdir + "/bionano_sv/cases/BspQI"]
full_paths = compileControlFiles(control_files_path, famid)
## Write an empty file
with open(args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz", 'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "cat " + path + "/exp_refineFinal1_merged_filter.smap | gzip >> " + args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Create a BN arg object
BN_args = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/" + args.sampleid + "/exp_refineFinal1_merged_filter.smap",
fpath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/BC" + famid + "01/exp_refineFinal1_merged_filter.smap",
mpath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/BC" + famid + "02/exp_refineFinal1_merged_filter.smap",
referencepath = args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes=args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
# Call bionano translocation
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano translocations on ' + args.sampleid + '...')
BN_translocation(BN_args)
# Call bionano deletion
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano deletions on ' + args.sampleid + '...')
cyto_BN_del, exon_calls_BN_del = BN_deletion(BN_args)
# Call bionano insertion
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano insertions on ' + args.sampleid + '...')
BN_insertion(BN_args)
# Call bionano duplications
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano duplications on ' + args.sampleid + '...')
cyto_BN_dup, exon_calls_BN_dup = BN_duplication(BN_args)
# Call bionano inversions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano inversions on ' + args.sampleid + '...')
BN_inversion(BN_args)
# Check potential compoundhets with SNPs and indels
BN_exons = pd.concat([exon_calls_BN_del, exon_calls_BN_dup])
if BN_exons.empty:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_Bionano_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
else:
BN_exons = pd.merge(BN_exons, all_small_variants, left_on='gene', right_on='Ref_Gene', how='inner')
BN_exons.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_Bionano_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
return cyto_BN_del, cyto_BN_dup, exon_calls_BN_del, exon_calls_BN_dup
def linkedreadSV(args, famid, gene_score_result_r, all_small_variants):
# Need to generate a reference file for all the medium size deletions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Generating linked-reads control files...')
control_files_path = [args.workdir + "/linkedRead_sv/controls", args.workdir + "/linkedRead_sv/cases"]
full_paths = compileControlFiles(control_files_path, famid)
## Write an empty file
with open(args.workdir + "/results/" + args.sampleid + "/10x_del_control.vcf.gz",'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "zcat " + path + "/dels.vcf.gz | gzip >> " + args.workdir + "/results/" + args.sampleid + "/10x_del_control.vcf.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Need to generate another reference file for large SVs
with open(args.workdir + "/results/" + args.sampleid + "/10x_largeSV_control.vcf.gz",'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "zcat " + path + "/large_svs.vcf.gz | gzip >> " + args.workdir + "/results/" + args.sampleid + "/10x_largeSV_control.vcf.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
tenx_args_del = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/linkedRead_sv/cases/" + args.sampleid + "/dels.vcf.gz",
fpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "01/dels.vcf.gz",
mpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "02/dels.vcf.gz",
referencepath = args.workdir + "/results/" + args.sampleid + "/10x_del_control.vcf.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes = args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
tenx_args_largeSV = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/linkedRead_sv/cases/" + args.sampleid + "/large_svs.vcf.gz",
fpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "01/large_svs.vcf.gz",
mpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "02/large_svs.vcf.gz",
referencepath = args.workdir + "/results/" + args.sampleid + "/10x_largeSV_control.vcf.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes=args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
# Call medium size deletions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads medium deletions on ' + args.sampleid + '...')
cyto_10x_del, exon_calls_10x_del = tenxdeletions(tenx_args_del)
# Call large deletions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large deletions on ' + args.sampleid + '...')
cyto_10x_del_largeSV, exon_calls_10x_largeSV_del = tenxlargesvdeletions(tenx_args_largeSV)
# Call large duplications
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large duplications on ' + args.sampleid + '...')
cyto_10x_dup_largeSV, exon_calls_10x_largeSV_dup = tenxlargesvduplications(tenx_args_largeSV)
# Call large inversions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large inversions on ' + args.sampleid + '...')
tenxlargesvinversions(tenx_args_largeSV)
# Call large breakends
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large breakends on ' + args.sampleid + '...')
tenxlargesvbreakends(tenx_args_largeSV)
# Call large unknwon calls
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large unknown on ' + args.sampleid + '...')
tenxlargesvunknown(tenx_args_largeSV)
# Check potential compoundhets with SNPs and indels
tenx_exons = pd.concat([exon_calls_10x_del, exon_calls_10x_largeSV_del, exon_calls_10x_largeSV_dup])
if tenx_exons.empty:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_10x_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
else:
tenx_exons = pd.merge(tenx_exons, all_small_variants, left_on='gene', right_on='Ref_Gene', how='inner')
tenx_exons.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_10x_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
return cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, exon_calls_10x_del, exon_calls_10x_largeSV_del, exon_calls_10x_largeSV_dup
def pyrangeJoin(df1_10x, df2_BN):
if df1_10x.empty or df2_BN.empty:
return pd.DataFrame()
df1_10x['Chromosome'], df1_10x['Start'], df1_10x['End'] = [df1_10x['CHROM'], df1_10x['POS'], df1_10x['END']]
df2_BN['Chromosome'], df2_BN['Start'], df2_BN['End'] = [df2_BN['RefcontigID1'], df2_BN['RefStartPos'], df2_BN['RefEndPos']]
overlap = PyRanges(df1_10x).join(PyRanges(df2_BN))
#print(overlap)
if not overlap.df.empty:
overlap = overlap.df
overlap['overlap_len'] = np.maximum(0, np.minimum(overlap.End, overlap.End_b) - np.maximum(overlap.Start,overlap.Start_b))
#overlap = overlap.drop(like="_b")
overlap = overlap.drop(['Chromosome', 'Start', 'End'], axis = 1)
overlap = overlap.loc[overlap['overlap_len'] > 0]
return overlap
else:
return overlap.df
def findConfDelDup(args, exon_calls_10x_del, exon_calls_10x_largeSV_del, exon_calls_10x_largeSV_dup, exon_calls_BN_del, exon_calls_BN_dup):
tenx_del = pd.concat([exon_calls_10x_del, exon_calls_10x_largeSV_del])
overlap_del_10x_BN = pyrangeJoin(tenx_del, exon_calls_BN_del)
overlap_del_10x_BN.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_exons.txt', sep='\t', index=False)
if exon_calls_BN_dup is not None: # some bionano assemblies were generated with old pipelines
overlap_dup_10x_BN = pyrangeJoin(exon_calls_10x_largeSV_dup, exon_calls_BN_dup)
overlap_dup_10x_BN.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_exons.txt', sep='\t', index=False)
else:
| pd.DataFrame() | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
combined unit test for methods eec_diet_max & eec_diet_timeseries;
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* this test tests both eec_diet_max & eec_diet_timeseries together (ok, so this violates the exact definition
* of 'unittest', get over it)
* the assertion check is that the maximum values from the timeseries match expectations
* this assumes that for the maximums to be 'as expected' then the timeseries are as well
* note: the 1st application day ('day_out') for the 2nd model simulation run is set to 0 here
* to make sure the timeseries processing works when an application occurs on 1st day of year
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [0, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
result = trex_empty.eec_diet_max(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_bird(self):
"""
unit test for function eec_dose_bird;
internal call to 'eec_diet_max' --> 'eed_diet_timeseries' --> conc_initial' and 'conc_timestep' are included;
internal call to 'fi_bird' included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_bird' are correctly implemented
* methods called inside of 'eec_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([7.763288, 2693.2339, 22.20837], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 240.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
# variables for 'fi_bird' (values reflect unittest for 'at_bird'
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
result = trex_empty.eec_dose_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1,
trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_bird(self):
"""
unit test for function arq_dose_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_bird' are correctly implemented
* methods called inside of 'arq_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.007014, 1.146429, 0.02478172], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
# variables for 'at_bird' (values reflect unittest for 'fi_bird'
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
result = trex_empty.arq_dose_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1,
trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_diet_bird(self):
"""
unit test for function arq_diet_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_diet_bird' are correctly implemented
* methods called inside of 'arq_diet_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.019563, 1.509543, 0.0046715], dtype='float')
result = pd.Series([], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
#trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.food_multiplier_init_sg = 110.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype='float')
trex_empty.lc50_bird = pd.Series([650., 718., 1102.], dtype='float')
#for i in range (len(trex_empty.food_multiplier_init_sg)):
# result[i] = trex_empty.arq_diet_bird(trex_empty.food_multiplier_init_sg[i])
result = trex_empty.arq_diet_bird(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_crq_diet_bird(self):
"""
unit test for function crq_diet_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'crq_diet_bird' are correctly implemented
* methods called inside of 'crq_diet_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([2.5432, 60.214, 0.050471], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = | pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object') | pandas.Series |
import warnings
from copy import deepcopy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from sklearn.base import RegressorMixin
from sklearn.linear_model import LinearRegression
from typing_extensions import Literal
from etna.datasets import TSDataset
from etna.ensembles import EnsembleMixin
from etna.loggers import tslogger
from etna.metrics import MAE
from etna.pipeline.base import BasePipeline
class StackingEnsemble(BasePipeline, EnsembleMixin):
"""StackingEnsemble is a pipeline that forecast future using the metamodel to combine the forecasts of the base models.
Examples
--------
>>> from etna.datasets import generate_ar_df
>>> from etna.datasets import TSDataset
>>> from etna.ensembles import VotingEnsemble
>>> from etna.models import NaiveModel
>>> from etna.models import MovingAverageModel
>>> from etna.pipeline import Pipeline
>>> import pandas as pd
>>> pd.options.display.float_format = '{:,.2f}'.format
>>> df = generate_ar_df(periods=100, start_time="2021-06-01", ar_coef=[0.8], n_segments=3)
>>> df_ts_format = TSDataset.to_dataset(df)
>>> ts = TSDataset(df_ts_format, "D")
>>> ma_pipeline = Pipeline(model=MovingAverageModel(window=5), transforms=[], horizon=7)
>>> naive_pipeline = Pipeline(model=NaiveModel(lag=10), transforms=[], horizon=7)
>>> ensemble = StackingEnsemble(pipelines=[ma_pipeline, naive_pipeline])
>>> _ = ensemble.fit(ts=ts)
>>> forecast = ensemble.forecast()
>>> forecast[:,:,"target"]
segment segment_0 segment_1 segment_2
feature target target target
timestamp
2021-09-09 0.70 1.47 0.20
2021-09-10 0.62 1.53 0.26
2021-09-11 0.50 1.78 0.36
2021-09-12 0.37 1.88 0.21
2021-09-13 0.46 1.87 0.25
2021-09-14 0.44 1.49 0.21
2021-09-15 0.36 1.56 0.30
"""
def __init__(
self,
pipelines: List[BasePipeline],
final_model: RegressorMixin = LinearRegression(),
n_folds: int = 3,
features_to_use: Union[None, Literal["all"], List[str]] = None,
n_jobs: int = 1,
joblib_params: Optional[Dict[str, Any]] = None,
):
"""Init StackingEnsemble.
Parameters
----------
pipelines:
List of pipelines that should be used in ensemble.
final_model:
Regression model with fit/predict interface which will be used to combine the base estimators.
n_folds:
Number of folds to use in the backtest. Backtest is not used for model evaluation but for prediction.
features_to_use:
Features except the forecasts of the base models to use in the ``final_model``.
n_jobs:
Number of jobs to run in parallel.
joblib_params:
Additional parameters for :py:class:`joblib.Parallel`.
Raises
------
ValueError:
If the number of the pipelines is less than 2 or pipelines have different horizons.
"""
self._validate_pipeline_number(pipelines=pipelines)
self.pipelines = pipelines
self.final_model = final_model
self._validate_backtest_n_folds(n_folds)
self.n_folds = n_folds
self.features_to_use = features_to_use
self.filtered_features_for_final_model: Union[None, Set[str]] = None
self.n_jobs = n_jobs
if joblib_params is None:
self.joblib_params = dict(verbose=11, backend="multiprocessing", mmap_mode="c")
else:
self.joblib_params = joblib_params
super().__init__(horizon=self._get_horizon(pipelines=pipelines))
def _filter_features_to_use(self, forecasts: List[TSDataset]) -> Union[None, Set[str]]:
"""Return all the features from ``features_to_use`` which can be obtained from base models' forecasts."""
features_df = pd.concat([forecast.df for forecast in forecasts], axis=1)
available_features = set(features_df.columns.get_level_values("feature")) - {"fold_number"}
features_to_use = self.features_to_use
if features_to_use is None:
return None
elif features_to_use == "all":
return available_features - {"target"}
elif isinstance(features_to_use, list):
features_to_use_unique = set(features_to_use)
if len(features_to_use_unique) == 0:
return None
elif features_to_use_unique.issubset(available_features):
return features_to_use_unique
else:
unavailable_features = features_to_use_unique - available_features
warnings.warn(f"Features {unavailable_features} are not found and will be dropped!")
return features_to_use_unique.intersection(available_features)
else:
warnings.warn(
"Feature list is passed in the wrong format."
"Only the base models' forecasts will be used for the final forecast."
)
return None
def _backtest_pipeline(self, pipeline: BasePipeline, ts: TSDataset) -> TSDataset:
"""Get forecasts from backtest for given pipeline."""
with tslogger.disable():
_, forecasts, _ = pipeline.backtest(ts=ts, metrics=[MAE()], n_folds=self.n_folds)
forecasts = TSDataset(df=forecasts, freq=ts.freq)
return forecasts
def fit(self, ts: TSDataset) -> "StackingEnsemble":
"""Fit the ensemble.
Parameters
----------
ts:
TSDataset to fit ensemble.
Returns
-------
self:
Fitted ensemble.
"""
self.ts = ts
# Get forecasts from base models on backtest to fit the final model on
forecasts = Parallel(n_jobs=self.n_jobs, **self.joblib_params)(
delayed(self._backtest_pipeline)(pipeline=pipeline, ts=deepcopy(ts)) for pipeline in self.pipelines
)
# Fit the final model
self.filtered_features_for_final_model = self._filter_features_to_use(forecasts)
x, y = self._make_features(forecasts=forecasts, train=True)
self.final_model.fit(x, y)
# Fit the base models
self.pipelines = Parallel(n_jobs=self.n_jobs, **self.joblib_params)(
delayed(self._fit_pipeline)(pipeline=pipeline, ts=deepcopy(ts)) for pipeline in self.pipelines
)
return self
def _make_features(
self, forecasts: List[TSDataset], train: bool = False
) -> Tuple[pd.DataFrame, Optional[pd.Series]]:
"""Prepare features for the ``final_model``."""
if self.ts is None:
raise ValueError("StackingEnsemble is not fitted! Fit the StackingEnsemble before calling forecast method.")
# Stack targets from the forecasts
targets = [
forecast[:, :, "target"].rename({"target": f"regressor_target_{i}"}, axis=1)
for i, forecast in enumerate(forecasts)
]
targets = pd.concat(targets, axis=1)
# Get features from filtered_features_for_final_model
features = pd.DataFrame()
if self.filtered_features_for_final_model is not None:
features_in_forecasts = [
list(
set(forecast.columns.get_level_values("feature")).intersection(
self.filtered_features_for_final_model
)
)
for forecast in forecasts
]
features = pd.concat(
[forecast[:, :, features_in_forecasts[i]] for i, forecast in enumerate(forecasts)], axis=1
)
features = features.loc[:, ~features.columns.duplicated()]
features_df = pd.concat([features, targets], axis=1)
# Flatten the features to fit the sklearn interface
x = | pd.concat([features_df.loc[:, segment] for segment in self.ts.segments], axis=0) | pandas.concat |
""" Module for data preprocessing.
"""
import datetime
import warnings
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
__all__ = [
'ColumnSelector',
'ColumnDropper',
'ColumnRename',
'NaDropper',
'Clip',
'DatetimeTransformer',
'NumericTransformer',
'TimeframeExtractor',
'DateExtractor',
'ValueMapper',
'Sorter',
'Fill',
'TimeOffsetTransformer',
'ConditionedDropper',
'ZeroVarianceDropper',
'SignalSorter',
'ColumnSorter',
'DifferentialCreator'
]
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Transformer to select a list of columns by their name.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': [0]})
>>> ColumnSelector(keys=['a']).transform(data)
pd.DataFrame({'a': [0]})
"""
def __init__(self, keys: List[str]):
"""Creates ColumnSelector.
Transformer to select a list of columns for further processing.
Args:
keys (List[str]): List of columns to extract.
"""
self._keys = keys
def fit(self, X, y=None):
return self
def transform(self, X):
"""Extracts the columns from `X`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns a DataFrame only containing the selected
features.
"""
return X.loc[:, self._keys]
class ColumnDropper(BaseEstimator, TransformerMixin):
"""Transformer to drop a list of columns by their name.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': [0]})
>>> ColumnDropper(columns=['b']).transform(data)
pd.DataFrame({'a': [0]})
"""
def __init__(self,
*,
columns: Union[List[str], Set[str]],
verbose: bool = False):
"""Creates ColumnDropper.
Transformer to drop a list of columns from the data frame.
Args:
keys (list): List of columns names to drop.
"""
self.columns = set(columns)
self.verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops a list of columns of `X`.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe without the dropped features.
"""
cols = set(X.columns.to_list())
if len(m := self.columns - cols) > 0:
warnings.warn(f'Columns {m} not found in dataframe.')
if self.verbose:
print(f'New columns: {cols - self.columns}. '
f'Removed: {self.columns}.')
return X.drop(self.columns, axis=1, errors='ignore')
class ColumnRename(BaseEstimator, TransformerMixin):
"""Transformer to rename column with a function.
Example:
>>> data = pd.DataFrame({'a.b.c': [0], 'd.e.f': [0]})
>>> ColumnRename(lambda x: x.split('.')[-1]).transform(data)
pd.DataFrame({'c': [0], 'f': [0]})
"""
def __init__(self, mapper: Callable[[str], str]):
"""Create ColumnRename.
Transformer to rename columns by a mapper function.
Args:
mapper (lambda): Mapper rename function.
Example:
Given column with name: a.b.c
lambda x: x.split('.')[-1]
Returns c
"""
self.mapper = mapper
def fit(self, X, y=None):
return self
def transform(self, X):
"""Renames a columns in `X` with a mapper function.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the dataframe with the renamed columns.
"""
# split the column name
# use the last item as new name
return X.rename(columns=self.mapper)
class NaDropper(BaseEstimator, TransformerMixin):
"""Transformer that drops rows with na values.
Example:
>>> data = pd.DataFrame({'a': [0, 1], 'b': [0, np.nan]})
>>> NaDropper().transform(data)
pd.DataFrame({'a': [0], 'b': [0]})
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
return X.dropna()
class Clip(BaseEstimator, TransformerMixin):
"""Transformer that clips values by a lower and upper bound.
Example:
>>> data = pd.DataFrame({'a': [-0.1, 1.2], 'b': [0.5, 0.6]})
>>> Clip().transform(data)
pd.DataFrame({'a': [0, 1], 'b': [0.5, 0.6]})
"""
def __init__(self, lower: float = 0.0, upper: float = 1.0):
"""Creates Clip.
Transformer that clips a numeric column to the treshold if the
threshold is exceeded. Works with an upper and lower threshold. Wrapper
for pd.DataFrame.clip.
Args:
lower (float, optional): lower limit. Defaults to 0.
upper (float, optional): upper limit. Defaults to 1.
"""
self.upper = upper
self.lower = lower
def fit(self, X, y=None):
return self
def transform(self, X):
return X.clip(lower=self.lower, upper=self.upper, axis=0)
class ColumnTSMapper(BaseEstimator, TransformerMixin):
def __init__(self,
cols: List[str],
timedelta: pd.Timedelta = pd.Timedelta(250, 'ms'),
classes: List[str] = None,
verbose: bool = False):
"""Creates ColumnTSMapper.
Expects the timestamp column to be of type pd.Timestamp.
Args:
cols (List[str]): names of [0] timestamp column, [1] sensor names,
[2] sensor values.
timedelta (pd.Timedelta): Timedelta to resample with.
classes (List[str]): List of sensor names.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._cols = cols
self._timedelta = timedelta
self._verbose = verbose
if classes is not None:
self.classes_ = classes
def fit(self, X, y=None):
"""Gets the unique values in the sensor name column that
are needed to expand the dataframe.
Args:
X (pd.DataFrame): Dataframe.
y (array-like, optional): Labels. Defaults to None.
Returns:
ColumnTSMapper: Returns this.
"""
classes = X[self._cols[1]].unique()
self.classes_ = np.hstack(['Timestamp', classes])
return self
def transform(self, X):
"""Performs the mapping to equidistant timestamps.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if column is not found in `X`.
Returns:
pd.DataFrame: Returns the remapped dataframe.
"""
# check is fit had been called
check_is_fitted(self)
# check if all columns exist
if not all([item in X.columns for item in self._cols]):
raise ValueError(
f'Columns {self._cols} not found in DataFrame '
f'{X.columns.to_list()}.')
# split sensors into individual columns
# create new dataframe with all _categories
# use timestamp index, to use resample later on
# initialized with na
sensors = pd.DataFrame(
None, columns=self.classes_, index=X[self._cols[0]])
# group by sensor
groups = X.groupby([self._cols[1]])
# write sensor values to sensors which is indexed by the timestamp
for g in groups:
sensors.loc[g[1][self._cols[0]], g[0]
] = g[1][self._cols[2]].to_numpy()
sensors = sensors.apply(pd.to_numeric, errors='ignore')
# fill na, important before resampling
# otherwise mean affects more samples than necessary
# first: forward fill to next valid observation
# second: backward fill first missing rows
sensors = sensors.fillna(method='ffill').fillna(method='bfill')
# resamples to equidistant timeframe
# take avg if multiple samples in the same timeframe
sensors = sensors.resample(self._timedelta).mean()
sensors = sensors.fillna(method='ffill').fillna(method='bfill')
# FIXME: to avoid nans in model, but needs better fix
sensors = sensors.fillna(value=0.0)
# move index to column and use rangeindex
sensors['Timestamp'] = sensors.index
sensors.index = pd.RangeIndex(stop=sensors.shape[0])
if self._verbose:
start, end = sensors.iloc[0, 0], sensors.iloc[-1, 0]
print('ColumnTSMapper: ')
print(f'{sensors.shape[0]} rows. '
f'Mapped to {self._timedelta.total_seconds()}s interval '
f'from {start} to {end}.')
return sensors
class DatetimeTransformer(BaseEstimator, TransformerMixin):
"""Transforms a list of columns to datetime.
Example:
>>> data = pd.DataFrame({'dt': ['2021-07-02 16:30:00']})
>>> data = DatetimeTransformer(columns=['dt']).transform(data)
>>> data.dtypes
dt datetime64[ns]
"""
def __init__(self, *, columns: List[str], dt_format: str = None):
"""Creates DatetimeTransformer.
Parses a list of column to pd.Timestamp.
Args:
columns (list): List of columns names.
dt_format (str): Optional format string.
"""
super().__init__()
self._columns = columns
self._format = dt_format
def fit(self, X, y=None):
return self
def transform(self, X):
"""Parses `columns` to datetime.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if columns are missing in `X`.
Returns:
pd.DataFrame: Returns the dataframe with datetime columns.
"""
X = X.copy()
# check if columns in dataframe
if len(diff := set(self._columns) - set(X.columns)):
raise ValueError(
f'Columns {diff} not found in DataFrame with columns'
f'{X.columns.to_list()}.')
# parse to pd.Timestamp
X[self._columns] = X[self._columns].apply(
lambda x: pd.to_datetime(x, format=self._format), axis=0)
# column wise
return X
class NumericTransformer(BaseEstimator, TransformerMixin):
"""Transforms a list of columns to numeric datatype.
Example:
>>> data = pd.DataFrame({'a': [0], 'b': ['1']})
>>> data.dtypes
a int64
b object
>>> data = NumericTransformer().transform(data)
>>> data.dtypes
a int64
b int64
"""
def __init__(self, *, columns: Optional[List[str]] = None):
"""Creates NumericTransformer.
Parses a list of column to numeric datatype. If None, all are
attempted to be parsed.
Args:
columns (list): List of columns names.
dt_format (str): Optional format string.
"""
super().__init__()
self._columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
"""Parses `columns` to numeric.
Args:
X (pd.DataFrame): Dataframe.
Raises:
ValueError: Raised if columns are missing in `X`.
Returns:
pd.DataFrame: Returns the dataframe with datetime columns.
"""
X = X.copy()
# transform all columns
if self._columns is None:
columns = X.columns.to_list()
else:
columns = self._columns
if len((diff := list(set(columns) - set(cols := X.columns)))):
raise ValueError(f'Columns found: {cols.to_list()}. '
f'Columns missing: {diff}.')
# parse to numeric
# column wise
X[columns] = X[columns].apply(pd.to_numeric, axis=0)
return X
class TimeframeExtractor(BaseEstimator, TransformerMixin):
"""Drops sampes that are not between a given start and end time.
Limits are inclusive.
Example:
>>> data = pd.DataFrame(
{'dates': [datetime.datetime(2021, 7, 2, 9, 50, 0),
datetime.datetime(2021, 7, 2, 11, 0, 0),
datetime.datetime(2021, 7, 2, 12, 10, 0)],
'values': [0, 1, 2]})
>>> TimeframeExtractor(time_column='dates',
start_time= datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0)
).transform(data)
pd.DataFrame({'dates': datetime.datetime(2021, 7, 2, 11, 0, 0),
'values': [1]})
"""
def __init__(self,
*,
time_column: str,
start_time: datetime.time,
end_time: datetime.time,
invert: bool = False,
verbose: bool = False):
"""Creates TimeframeExtractor.
Drops samples that are not in between `start_time` and `end_time` in
`time_column`.
Args:
time_column (str): Column name of the timestamp column.
start_time (datetime.time): Start time.
end_time (datetime.time): End time.
invert(bool): Whether to invert the range.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._start = start_time
self._end = end_time
self._column = time_column
self._negate = invert
self._verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops rows from the dataframe if they are not in between
`start_time` and `end_time`. Limits are inclusive. Reindexes the
dataframe.
Args:
X (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Returns the new dataframe.
"""
X = X.copy()
rows_before = X.shape[0]
dates = pd.to_datetime(X[self._column])
if self._negate:
X = X.loc[~((dates.dt.time >= self._start) &
(dates.dt.time <= self._end)), :]
else:
X = X.loc[(dates.dt.time >= self._start) &
(dates.dt.time <= self._end), :]
X.index = pd.RangeIndex(0, X.shape[0])
rows_after = X.shape[0]
if self._verbose:
print(
'TimeframeExtractor: \n'
f'{rows_after} rows. Dropped {rows_before - rows_after} '
f'rows which are {"in" if self._negate else "not in"} between '
f'{self._start} and {self._end}.'
)
return X
class DateExtractor(BaseEstimator, TransformerMixin):
""" Drops rows that are not between a start and end date.
Limits are inclusive.
Example:
>>> data = pd.DataFrame(
{'dates': [datetime.datetime(2021, 7, 1, 9, 50, 0),
datetime.datetime(2021, 7, 2, 11, 0, 0),
datetime.datetime(2021, 7, 3, 12, 10, 0)],
'values': [0, 1, 2]})
>>> DateExtractor(date_column='dates',
start_date=datetime.date(2021, 7, 2),
end_date=datetime.date(2021, 7, 2)).transform(data)
pd.DataFrame({'dates': datetime.datetime(2021, 07, 2, 11, 0, 0),
'values': [1]})
"""
def __init__(self,
*,
date_column: str,
start_date: datetime.date,
end_date: datetime.date,
invert: bool = False,
verbose: bool = False):
"""Initializes `DateExtractor`.
Args:
date_column (str): Name of timestamp column.
start_date (datetime.date): Start date.
end_date (datetime.date): End date.
invert (bool): Whether to invert the range.
verbose (bool, optional): Whether to allow prints.
"""
super().__init__()
self._start = start_date
self._end = end_date
self._column = date_column
self._negate = invert
self._verbose = verbose
def fit(self, X, y=None):
return self
def transform(self, X):
"""Drops rows which date is not between `start` and end date.
Bounds are inclusive. Dataframe is reindexed.
Args:
X (pd.Dataframe): Dataframe.
Returns:
pd.Dataframe: Returns the new dataframe.
"""
rows_before = X.shape[0]
dates = pd.to_datetime(X[self._column])
if self._negate:
X = X.loc[~((dates.dt.date >= self._start) &
(dates.dt.date <= self._end)), :]
else:
X = X.loc[(dates.dt.date >= self._start) &
(dates.dt.date <= self._end), :]
X.index = | pd.RangeIndex(0, X.shape[0]) | pandas.RangeIndex |
"""
This script does a quick sanity check about how the communities are disconnected (i.e., how many connections exist
among different communities), using the pickle files generated in script `04_01`.
"""
import pickle
import numpy as np
import pandas as pd
from definitions import TISSUES
# python -u 04_02_quick_summary.py | tee outputs/output_04_02.txt
# Just to check whether communities are disconnected
for tissue_name in TISSUES:
print("## " + tissue_name)
communities, _ = pickle.load(open("results/louvain_modules_" + tissue_name + ".pkl", "rb"))
corr_mat = pd.read_pickle("data/corr_" + tissue_name + ".pkl")
corr_mat = corr_mat.replace([np.inf], np.nan).fillna(0)
corr_arr = corr_mat.values.copy()
corr_arr[(corr_arr > -0.8) & (corr_arr < 0.8)] = 0
size_connections = {}
uniqs = np.unique(communities, return_counts=True)
for ind, size in enumerate(uniqs[1]):
if size == 1:
id_comunity = uniqs[0][ind]
pos = np.where(communities == id_comunity)
sum_val = np.sum(corr_arr[pos, :])
# If it is connected to other communities, it will print
if sum_val > 0:
print("Community of size 1 connected! ... " + str(sum_val))
# Checking whether communities are disconnected among them
else:
# Community IDs of a certain size
com_sizes = [uniqs[0][i] for i, elem in enumerate(uniqs[1]) if elem == size]
com_others = [uniqs[0][i] for i, elem in enumerate(uniqs[1]) if elem != size and elem != 1]
pos_sizes = np.where(np.isin(communities, com_sizes))
pos_others = np.where(np.isin(communities, com_others))
all_df = | pd.DataFrame(corr_arr, index=corr_mat.index, columns=corr_mat.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
def test_group_by(c):
df = c.sql(
"""
SELECT
user_id, SUM(b) AS "S"
FROM user_table_1
GROUP BY user_id
"""
)
df = df.compute()
expected_df = pd.DataFrame({"user_id": [1, 2, 3], "S": [3, 4, 3]})
assert_frame_equal(df.sort_values("user_id").reset_index(drop=True), expected_df)
def test_group_by_all(c, df):
result_df = c.sql(
"""
SELECT
SUM(b) AS "S", SUM(2) AS "X"
FROM user_table_1
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame({"S": [10], "X": [8]})
expected_df["S"] = expected_df["S"].astype("int64")
expected_df["X"] = expected_df["X"].astype("int32")
assert_frame_equal(result_df, expected_df)
result_df = c.sql(
"""
SELECT
SUM(a) AS sum_a,
AVG(a) AS avg_a,
SUM(b) AS sum_b,
AVG(b) AS avg_b,
SUM(a)+AVG(b) AS mix_1,
SUM(a+b) AS mix_2,
AVG(a+b) AS mix_3
FROM df
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame(
{
"sum_a": [df.a.sum()],
"avg_a": [df.a.mean()],
"sum_b": [df.b.sum()],
"avg_b": [df.b.mean()],
"mix_1": [df.a.sum() + df.b.mean()],
"mix_2": [(df.a + df.b).sum()],
"mix_3": [(df.a + df.b).mean()],
}
)
assert_frame_equal(result_df, expected_df)
def test_group_by_filtered(c):
df = c.sql(
"""
SELECT
SUM(b) FILTER (WHERE user_id = 2) AS "S1",
SUM(b) "S2"
FROM user_table_1
"""
)
df = df.compute()
expected_df = pd.DataFrame({"S1": [4], "S2": [10]}, dtype="int64")
assert_frame_equal(df, expected_df)
def test_group_by_filtered2(c):
df = c.sql(
"""
SELECT
user_id,
SUM(b) FILTER (WHERE user_id = 2) AS "S1",
SUM(b) "S2"
FROM user_table_1
GROUP BY user_id
"""
)
df = df.compute()
expected_df = pd.DataFrame(
{"user_id": [1, 2, 3], "S1": [np.NaN, 4.0, np.NaN], "S2": [3, 4, 3],},
)
assert_frame_equal(df, expected_df)
def test_group_by_case(c):
df = c.sql(
"""
SELECT
user_id + 1, SUM(CASE WHEN b = 3 THEN 1 END) AS "S"
FROM user_table_1
GROUP BY user_id + 1
"""
)
df = df.compute()
user_id_column = '"user_table_1"."user_id" + 1'
expected_df = | pd.DataFrame({user_id_column: [2, 3, 4], "S": [1, 1, 1]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import orca
from urbansim_templates import utils
def test_parse_version():
assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0)
assert utils.parse_version('0.115.3') == (0, 115, 3, None)
assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7)
assert utils.parse_version('5.4') == (5, 4, 0, None)
def test_version_greater_or_equal():
assert utils.version_greater_or_equal('2.0', '0.1.1') == True
assert utils.version_greater_or_equal('0.1.1', '2.0') == False
assert utils.version_greater_or_equal('2.1', '2.0.1') == True
assert utils.version_greater_or_equal('2.0.1', '2.1') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.2') == True
assert utils.version_greater_or_equal('1.1.2', '1.1.3') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.3') == True
assert utils.version_greater_or_equal('1.1.3.dev1', '1.1.3.dev0') == True
assert utils.version_greater_or_equal('1.1.3.dev0', '1.1.3') == False
###############################
## get_df
@pytest.fixture
def df():
d = {'id': [1,2,3], 'val1': [4,5,6], 'val2': [7,8,9]}
return pd.DataFrame(d).set_index('id')
def test_get_df_dataframe(df):
"""
Confirm that get_df() works when passed a DataFrame.
"""
df_out = utils.get_df(df)
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_str(df):
"""
Confirm that get_df() works with str input.
"""
orca.add_table('df', df)
df_out = utils.get_df('df')
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_dataframewrapper(df):
"""
Confirm that get_df() works with orca.DataFrameWrapper input.
"""
dfw = orca.DataFrameWrapper('df', df)
df_out = utils.get_df(dfw)
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_tablefuncwrapper(df):
"""
Confirm that get_df() works with orca.TableFuncWrapper input.
"""
def df_callable():
return df
tfw = orca.TableFuncWrapper('df', df_callable)
df_out = utils.get_df(tfw)
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_columns(df):
"""
Confirm that get_df() limits columns, and filters out duplicates and invalid ones.
"""
dfw = orca.DataFrameWrapper('df', df)
df_out = utils.get_df(dfw, ['id', 'val1', 'val1', 'val3'])
| pd.testing.assert_frame_equal(df[['val1']], df_out) | pandas.testing.assert_frame_equal |
##############################################################################
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
import dfpipeline as dfp
df = pd.DataFrame({
'sex': ["male", "female", "female", "male", "female", "male", "female", "female"],
'C2': [3, 4, 6, 9, None, 17, 20, 100]
})
result_df = df.copy()
result_df['C2'] = result_df['C2'].astype(np.float64)
def test_typeconv():
conv = dfp.TypeConverter(columns=['C2'], type=np.float64)
out = conv.fit_transform(df.copy())
| assert_frame_equal(out, result_df) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# @file multi_md_analysis.py
# @brief multi_md_analysis object
# @author <NAME>
#
# <!--------------------------------------------------------------------------
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------ -->
import itertools
import hdbscan
import matplotlib
import matplotlib.cm as cm
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.metrics import silhouette_samples, silhouette_score, calinski_harabaz_score
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
import os
import sys
import pickle
import time
import pylab as plt
from scipy import linalg
from pandas import HDFStore, DataFrame
import matplotlib as mpl
import mdtraj as md
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as ticker
from sklearn.decomposition import PCA
from sklearn import mixture
from multiprocessing import Pool
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import converters
from molmolpy.utils import plot_tools
from molmolpy.utils import pdb_tools
from molmolpy.utils import folder_utils
from molmolpy.utils import protein_analysis
from molmolpy.utils import nucleic_analysis
from molmolpy.utils import helper as hlp
from itertools import combinations
import seaborn as sns
import numba
matplotlib.rcParams.update({'font.size': 12})
# matplotlib.style.use('ggplot')
sns.set(style="white", context='paper')
# font = {'family' : 'normal',
# 'weight' : 'bold',
# 'size' : 18}
#
# matplotlib.rc('font', **font)
class MultiMDAnalysisObject(object):
"""
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Convert gro to PDB so mdtraj recognises topology
YEAH
gmx editconf -f npt.gro -o npt.pdb
"""
def __init__(self, file_list=None):
self.simulation_data = {}
self.sim_indexes = []
if file_list is not None:
if len(file_list) > 0:
for i in range(len(file_list)):
self.add_simulation_pickle_data(i + 1, file_list[i])
self.sim_indexes.append(i + 1)
colors = sns.cubehelix_palette(n_colors=len(file_list), rot=.7, dark=0, light=0.85)
self.colors_ = colors
test = 1
def add_simulation_pickle_data(self, index, filename):
temp_data = pickle.load(open(filename, "rb"))
self.simulation_data.update({str(index): temp_data})
@hlp.timeit
def plot_rmsd_multi(self, selection,
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=1200,
custom_labels=None,
position='best',
noTitle=True,
size_x=8.4,
size_y=7):
import pylab as plt
sns.set(style="ticks", context='paper')
sns.set(font_scale=0.8)
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
# fig.suptitle(title, fontsize=16)
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# fig = plt.figure(figsize=(10, 7))
fig = plt.figure(figsize=plot_tools.cm2inch(size_x, size_y))
# fig.suptitle(title, fontsize=16)
if noTitle is False:
fig.suptitle(title)
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
traj_rmsd = self.simulation_data[str(i)]['rmsd'][selection]
if custom_labels is None:
curr_label = 'Simulation {0}'.format(i)
else:
curr_label = '{0}'.format(custom_labels[i-1])
curr_color = self.colors_[i - 1]
plt.plot(self.sim_time, traj_rmsd, color=curr_color,
linewidth=0.52, label=curr_label)
# plt.legend(loc="best", prop={'size': 8})
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
leg = plt.legend(loc=position, shadow=True, ncol=2)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(6.0)
# remove part of ticks
sns.despine()
fig.savefig('Multi_Plot_RMSD_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def plot_rg_multi(self,
selection,
title='LasR Rg',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ Rg from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
custom_dpi=600):
import pylab as plt
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
# In[27]:
fig = plt.figure(figsize=(10, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
traj_rmsd = self.simulation_data[str(i)]['Rg'][selection]
curr_label = 'Simulation {0}'.format(i)
curr_color = self.colors_[i - 1]
plt.plot(self.sim_time, traj_rmsd, color=curr_color,
linewidth=0.6, label=curr_label)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# remove part of ticks
sns.despine()
# In[28]:
fig.savefig('Multi_Plot_Rg_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Rg plot created')
print('-----------------------------------\n')
# TODO calculate confidence intervals
@hlp.timeit
def plot_rmsf_plus_confidence_multi(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=600):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
fig = plt.figure(figsize=(14, 7))
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
curr_label = 'Simulation {0}'.format(i)
traj_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['rmsf']
atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
curr_color = self.colors_[i - 1]
conv_data = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf, x_axis_name='Residue',
y_axis_name='RMSF')
conv_data['Residue'] += 1
confidence = hlp.mean_confidence_interval(conv_data['RMSF'])
# plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# linewidth=0.6, label=curr_label)
# Plot the response with standard error
sns.tsplot(data=conv_data, ci=[95], color="m")
# plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
# linewidth=0.6, label=curr_label)
# plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_RMSF_confidence_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF +confidence plot created')
@hlp.timeit
def prep_mdtraj_object(self, filename):
'''
Prepare receptor mdtraj object
get mdtraj topology and save as pandas dataframe
Calculate pdb receptor center of mass
:return:
'''
self.receptor_file = filename
self.receptor_mdtraj = md.load_pdb(self.receptor_file)
self.receptor_mdtraj_topology = self.receptor_mdtraj.topology
self.receptor_mdtraj_topology_dataframe = self.receptor_mdtraj.topology.to_dataframe()
topology = self.receptor_mdtraj.topology
atom_indices = topology.select('backbone')
test = 1
# self.center_of_mass_receptor = md.compute_center_of_mass(self.receptor_mdtraj)[0]
#
# self.x_center = math.ceil(self.center_of_mass_receptor[0] * 10)
# self.y_center = math.ceil(self.center_of_mass_receptor[1] * 10)
# self.z_center = math.ceil(self.center_of_mass_receptor[2] * 10)
#
# self.receptor_pybel = pybel.readfile("pdb", self.receptor_file).__next__()
# self.ligand_pybel = pybel.readfile("pdb", self.ligand_file).__next__()
test = 1
@hlp.timeit
def plot_rmsf_multi(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=1200):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
sns.set(font_scale=0.8)
# fig = plt.figure(figsize=(14, 7))
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig = plt.figure(figsize=plot_tools.cm2inch(8.4, 8.4))
# fig.suptitle(title, fontsize=16)
fig.suptitle(title)
# self.receptor_mdtraj_topology.atom(3000).residue.resSeq
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
curr_label = 'Simulation {0}'.format(i)
traj_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['rmsf']
atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
curr_color = self.colors_[i - 1]
converted_resseq,converted_index = converters.convert_mdtraj_atom_nums_to_resseq(self.receptor_mdtraj_topology,
atom_indices_rmsf)
conv_data_temp = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf)
conv_data = conv_data_temp.ix[converted_index]
conv_data['x'] = converted_resseq
test = 1
# plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# linewidth=0.6, label=curr_label)
plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
linewidth=0.52, label=curr_label)
#plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
plt.xlabel(xlabel)
plt.ylabel(ylabel) #
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
leg = plt.legend(loc='best', shadow=True)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(6.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_RMSF_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF plot created')
def count_lig_hbond(self, t, hbonds, ligand):
label = lambda hbond: '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
hbond_atoms = []
hbond_indexes_sel = []
hbond_count = 0
for hbond in hbonds:
res = label(hbond)
# print('res ', res)
if ligand in res:
# print("res is ", res)
hbond_atoms.append(res)
hbond_indexes_sel.append(hbond)
hbond_count += 1
test=1
# print('------------------------------------------------')
test = 1
return hbond_atoms, hbond_count, hbond_indexes_sel
@hlp.timeit
def hbond_lig_count_analysis(self,
ligand_name='HSL',
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=600):
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
fig = plt.figure(figsize=(14, 7))
title = 'Simulations of Clusters {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
traj_frame = self.simulation_data[str(self.sim_indexes[0])]['clustersCentroid']
self.sim_time = self.simulation_data[str(self.sim_indexes[0])]['time']
t = traj_frame[0]
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
hbonds_frames = self.simulation_data[str(i)]['hbondFrames']
sim_hbond_atoms = []
sim_hbond_count = []
for hbonds in hbonds_frames:
hbond_atoms, hbond_count, hbond_indexes_sel = self.count_lig_hbond(t, hbonds, ligand_name)
sim_hbond_atoms.append(hbond_atoms)
sim_hbond_count.append(hbond_count)
sim_hbound_np = np.array(sim_hbond_count)
self.simulation_data[str(i)].update({'hbond_atoms':sim_hbond_atoms})
self.simulation_data[str(i)].update({'hbond_count':sim_hbond_count})
curr_color = self.colors_[i - 1]
# curr_label = 'Simulation {0}'.format(i)
curr_label = "Simulation of Cluster {0} mean: {1}±{2}".format(i, round(np.mean(sim_hbound_np),3),
round(np.std(sim_hbond_count),3))
# Version 1
plt.plot(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
linewidth=0.2, label=curr_label)
# Version 2
# plt.scatter(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
# linewidth=0.3, label=curr_label)
# data_frame = converters.convert_data_to_pandas(self.sim_time, self.hbond_count)
#
# y_average_mean = data_frame['y'].rolling(center=False, window=20).mean()
# atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
# curr_color = self.colors_[i - 1]
#
# conv_data = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf)
#
# # plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# # linewidth=0.6, label=curr_label)
#
# plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
# linewidth=0.6, label=curr_label)
# plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
test = 1
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_HBOND_count_Lig_' + '_' + title + '_' + ligand_name + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Multi HBond lig count plot created')
@hlp.timeit
def hbond_freq_plot_analysis(self,
ligand_name='HSL',
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=600):
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
traj_frame = self.simulation_data[str(self.sim_indexes[0])]['clustersCentroid']
self.sim_time = self.simulation_data[str(self.sim_indexes[0])]['time']
t = traj_frame[0]
for i in self.sim_indexes:
plt.clf()
fig = plt.figure(figsize=(14, 7))
title = 'Simulations of Clusters {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
self.sim_time = self.simulation_data[str(i)]['time']
hbonds_frames = self.simulation_data[str(i)]['hbondFrames']
sim_hbond_atoms = []
sim_hbond_count = []
sim_hbond_sel = []
for hbonds in hbonds_frames:
hbond_atoms, hbond_count, hbond_indexes_sel = self.count_lig_hbond(t, hbonds, ligand_name)
sim_hbond_atoms.append(hbond_atoms)
sim_hbond_count.append(hbond_count)
if len( hbond_indexes_sel) > 0:
sim_hbond_sel+= hbond_indexes_sel
sim_hbound_np = np.array(sim_hbond_count)
sim_hbound_sel_np = np.array(sim_hbond_sel)
# self.simulation_data[str(i)].update({'hbond_atoms':sim_hbond_atoms})
# self.simulation_data[str(i)].update({'hbond_count':sim_hbond_count})
# curr_color = self.colors_[i - 1]
# curr_label = 'Simulation {0}'.format(i)
curr_label = "Simulation of Cluster {0} mean: {1}±{2}".format(i, round(np.mean(sim_hbound_np),3),
round(np.std(sim_hbond_count),3))
# This won't work here
da_distances = md.compute_distances(t, sim_hbound_sel_np[:, [0, 2]], periodic=False)
# Version 1
# plt.plot(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
# linewidth=0.2, label=curr_label)
# color = itertools.cycle(['r', 'b', 'gold'])
colors = sns.cubehelix_palette(n_colors=len(da_distances), rot=-.4)
# self.colors_ = colors
label = lambda hbond: '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
color = itertools.cycle(['r', 'b', 'gold'])
for i in [0]:
plt.hist(da_distances[:, i], color=colors[i], label=label(sim_hbound_sel_np[i]), alpha=0.5)
plt.legend()
plt.ylabel('Freq');
plt.xlabel('Donor-acceptor distance [nm]')
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
#
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
#
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(9.0)
sns.despine()
fig.savefig('Multi_Plot_HBOND_frequency_' + '_' + title + '_' + str(i)+ '_'+ ligand_name + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Multi HBond frequency lig plot created')
@hlp.timeit
def plot_solvent_area_multi(self, show=False):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.sasa_traj.time, self.total_sasa)
plt.xlabel('Time [ps]', size=16)
plt.ylabel('Total SASA (nm)^2', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_plot.png', dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_solvent_area_frame_multi(self, frame, show=False):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.sasa_traj.time, self.sasa[frame])
plt.xlabel('Time [ps]', size=16)
plt.ylabel('Total SASA (nm)^2', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_plot_{0}.png'.format(frame), dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_solvent_area_autocorr_multi(self, show=False):
self.sasa_autocorr = protein_analysis.autocorr(self.total_sasa)
fig = plt.figure(figsize=(10, 10))
plt.semilogx(self.sasa_traj.time, self.sasa_autocorr)
plt.xlabel('Time [ps]', size=16)
plt.ylabel('SASA autocorrelation', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_autocorrelation.png', dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_rmsd_cluster_color_multi(self, selection,
title='LasR RMSD',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=300,
lang='rus'):
import pylab as plt
sns.set(style="ticks", context='paper')
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
fig = plt.figure(figsize=(14, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# plt.plot(self.sim_time, self.sim_rmsd, color=self.cluster_colors,
# linewidth=0.6, label='LasR')
if lang == 'rus':
title = 'Симуляция'
xlabel = r"Время $t$ (нс)"
ylabel = r"RMSD(нм)"
else:
title = 'Simulation'
xlabel = r"Time $t$ (ns)"
ylabel = r"RMSD(nm)"
sns.set(font_scale=2)
plt.plot(self.sim_time, self.sim_rmsd, zorder=1)
traj_rmsd = self.rmsd_analysis_data[selection]
plt.scatter(self.sim_time, traj_rmsd, marker='o', s=30, facecolor='0.5', lw=0,
c=self.cluster_colors, zorder=2)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.xlim(self.sim_time[0], self.sim_time[-1])
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
fig.tight_layout()
# remove part of ticks
sns.despine()
# plt.show()
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '_cluster_color' + '_' + lang + '.png',
dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def find_best_fit_regressor(self):
# from sklearn.tree import DecisionTreeRegressor
self.best = 100
self.index = 100
self.best_rg = 100
self.index_rg = 100
self.regr_index = []
self.regr_scores = {}
self.regr_index_rg = []
self.regr_scores_rg = {}
self.reshaped_time = self.sim_time.reshape(-1, 1)
for i in list(range(1, self.regression_fit_range + 1)):
self.create_fit(i)
print('best score is ', self.best)
print('best index is', self.index)
print('-=-' * 10)
print('best score Rg is ', self.best_rg)
print('best index Rg is', self.index_rg)
@hlp.timeit
def create_fit(self, i):
from sklearn import tree
from sklearn.model_selection import cross_val_score
self.reshaped_time = self.sim_time.reshape(-1, 1)
regressor = tree.DecisionTreeRegressor(max_depth=i) # interesting absolutely
fitVal = regressor.fit(self.reshaped_time, self.sim_rmsd)
print('fitVal ', fitVal)
rmsd_pred = regressor.predict(self.reshaped_time)
# cv how is it determined?
# A good compromise is ten-fold cross-validation. 10ns
# Maybe mse better?
cross_val = cross_val_score(regressor,
self.reshaped_time,
self.sim_rmsd,
scoring="neg_mean_squared_error",
cv=10)
regressor_rg = tree.DecisionTreeRegressor(max_depth=i) # interesting absolutely
fitVal_rg = regressor_rg.fit(self.reshaped_time, self.rg_res)
fitVal_rg = regressor_rg.fit(self.reshaped_time, self.rg_res)
print('fitVal ', fitVal)
rmsd_pred_rg = regressor_rg.predict(self.reshaped_time)
# cv how is it determined?
# A good compromise is ten-fold cross-validation. 10ns
cross_val_rg = cross_val_score(regressor,
self.reshaped_time,
self.rg_res,
scoring="neg_mean_squared_error",
cv=10)
self.regr_scores.update({i: cross_val})
self.regr_index.append(i)
self.regr_scores_rg.update({i: cross_val_rg})
self.regr_index_rg.append(i)
cross_val_score = -cross_val.mean()
cross_val_std = cross_val.std()
cross_val_score_rg = -cross_val_rg.mean()
cross_val_std_rg = cross_val_rg.std()
print('Cross validation score is ', cross_val)
print("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(i, -cross_val.mean(), cross_val.std()))
print('-=-' * 10)
print('Cross validation Rg score is ', cross_val_rg)
print("Rg Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(i, -cross_val_rg.mean(), cross_val_rg.std()))
# r2_score = regressor.score(self.sim_time.reshape(-1, 1), self.sim_rmsd)
# if r2_score > self.r2_best:
# self.r2_best = r2_score
# self.r2_index = i
if cross_val_score < self.best:
self.best = cross_val_score
self.index = i
if cross_val_score_rg < self.best_rg:
self.best_rg = cross_val_score_rg
self.index_rg = i
del regressor
del fitVal
del rmsd_pred
time.sleep(2)
# print('R2 score is ', r2_score)
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_rmsd_fit(self):
import matplotlib.pyplot as plt
x = self.regr_index
y = []
yerr_list = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
cross_val_score = -self.regr_scores[i].mean()
cross_val_std = self.regr_scores[i].std()
y.append(cross_val_score)
yerr_list.append(cross_val_std)
fig = plt.figure(figsize=(10, 10))
plt.errorbar(x, y, yerr=yerr_list)
plt.scatter(x, y, s=160, c='b', marker='h',
label="Best score at Max Depth={}\nMSE = {:.2e}(+/- {:.2e})".format(self.index,
-self.regr_scores[
self.index].mean(),
self.regr_scores[
self.index].std()))
plt.legend(loc="best", prop={'size': 20})
plt.title("Mean squared error (MSE) averages for RMSD")
fig.savefig(self.simulation_name + '_errorBar_rmsd.png', dpi=300, bbox_inches='tight')
# plt.show()
print('Errorbar created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_Rg_fit(self):
import matplotlib.pyplot as plt
x = self.regr_index
y = []
yerr_list = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
cross_val_score = -self.regr_scores_rg[i].mean()
cross_val_std = self.regr_scores_rg[i].std()
y.append(cross_val_score)
yerr_list.append(cross_val_std)
fig = plt.figure(figsize=(10, 10))
plt.errorbar(x, y, yerr=yerr_list)
plt.scatter(x, y, s=160, c='b', marker='h',
label="Best score at Max Depth={}\nMSE = {:.2e}(+/- {:.2e})".format(self.index_rg,
-self.regr_scores_rg[
self.index_rg].mean(),
self.regr_scores_rg[
self.index_rg].std()))
plt.legend(loc="best", prop={'size': 20})
plt.title("Mean squared error (MSE) averages for Rg")
fig.savefig(self.simulation_name + '_errorBar_Rg.png', dpi=300, bbox_inches='tight')
# plt.show()
print('Errorbar created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_fit_test(self):
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
# example variable error bar values
yerr = 0.1 + 0.2 * np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
plt.figure()
plt.errorbar(x, y, xerr=0.2, yerr=0.4)
plt.title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0, 0]
ax.errorbar(x, y, yerr=yerr, fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0, 1]
ax.errorbar(x, y, xerr=xerr, fmt='o')
ax.set_title('Hor. symmetric')
ax = axs[1, 0]
ax.errorbar(x, y, yerr=[yerr, 2 * yerr], xerr=[xerr, 2 * xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1, 1]
ax.set_yscale('log')
# Here we have to be careful to keep all y values positive:
ylower = np.maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2 * yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
plt.show()
@hlp.timeit
def plot_boxplot_fit_regr(self):
data_to_plot = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
data_to_plot.append(self.regr_scores[i])
# Create a figure instance
fig = plt.figure(figsize=(10, 10))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
# change outlier to hexagon
# bp = ax.boxplot(data_to_plot, 0, 'gD')
# dont show outlier
bp = ax.boxplot(data_to_plot, 0, '')
# Save the figure
fig.savefig(self.simulation_name + '_boxplot.png', dpi=600, bbox_inches='tight')
# plt.show()
print('Box plot created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def example_test(self):
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
degrees = [1, 4, 8, 15, 20]
# true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = self.sim_time
y = self.sim_rmsd
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X, y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X, y,
scoring="neg_mean_squared_error", cv=10)
X_test = self.sim_time
plt.plot(X_test, pipeline.predict(X_test), label="Model")
plt.plot(X_test, self.sim_rmsd, label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
@hlp.timeit
def plot_rmsd_with_regressor(self, title='LasR Simulation RMSD',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)"):
import pylab as plt
from sklearn import tree
rfc = tree.DecisionTreeRegressor(max_depth=self.index) # interesting absolutely
fitVal = rfc.fit(self.sim_time.reshape(-1, 1), self.sim_rmsd)
print('fitVal ', fitVal)
self.rmsd_pred = rfc.predict(self.sim_time.reshape(-1, 1))
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(self.sim_time, self.sim_rmsd, color='b',
linewidth=0.6, label='Original Data')
plt.plot(self.sim_time, self.rmsd_pred, color='r',
linewidth=4, label='Fitted Data')
plt.legend(loc="best", prop={'size': 30})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_tree' + '.png', dpi=300, bbox_inches='tight')
print('RMSD plot created with regressor')
print('-----------------------------------\n')
@hlp.timeit
def plot_Rg_with_regressor(self, title='LasR Radius of Gyration',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)"):
import pylab as plt
from sklearn import tree
rfc = tree.DecisionTreeRegressor(max_depth=self.index_rg) # interesting absolutely
fitVal = rfc.fit(self.sim_time.reshape(-1, 1), self.rg_res)
print('fitVal ', fitVal)
self.rmsd_pred_rg = rfc.predict(self.sim_time.reshape(-1, 1))
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(self.sim_time, self.rg_res, color='b',
linewidth=0.6, label='Original Data')
plt.plot(self.sim_time, self.rmsd_pred_rg, color='r',
linewidth=4, label='Fitted Data')
plt.legend(loc="best", prop={'size': 30})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_tree' + '.png', dpi=300, bbox_inches='tight')
print('RMSD plot created with regressor')
print('-----------------------------------\n')
@hlp.timeit
def md_full_load(self, custom_stride=10):
print('MD Load has been called\n')
print('-------------------------------\n')
self.full_traj = md.load(self.md_trajectory_file, top=self.md_topology_file,
stride=custom_stride)
self.sim_time = self.full_traj.time / 1000
print("Full trajectory loaded successfully")
print('-----------------------------------\n')
@hlp.timeit
def rg_analysis(self, selection='protein'):
self.called_rg_analysis = True
# self.rg_traj = self.full_traj[:]
#
# self.topology = self.rmsd_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.rg_traj.restrict_atoms(self.selection)
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
self.rg_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.rg_res = md.compute_rg(self.rg_traj)
self.rg_analysis_data.update({selection: self.rg_res})
print("Rg has been calculated")
print('-----------------------------------\n')
@hlp.timeit
def hbond_analysis_count(self, selection='protein',
title='LasR H-Bonds',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=300):
sns.set(style="ticks", context='paper')
self.called_hbond_analysis_count = True
print('HBonds analysis has been called\n')
print('-------------------------------\n')
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
print('selection is ', self.selection)
# this is for keeping selection from trajectory
# self.full_traj.restrict_atoms(self.selection)
self.hbond_count = []
self.sim_time = self.full_traj.time / 1000
# paral = Pool(processes=16)
# data_count = list(map(self.hbond_frame_calc, self.full_traj))
#
# print('data count ',data_count)
# hbonds = md.baker_hubbard(self.full_traj, exclude_water=True, periodic=False)
# print('count of hbonds is ', len(hbonds))
# self.hbond_count.append(len(hbonds))
hbonds_frames = md.wernet_nilsson(self.full_traj, exclude_water=True, periodic=False)
self.hbonds_frames = hbonds_frames
for hbonds in hbonds_frames:
self.hbond_count.append(len(hbonds))
data_frame = converters.convert_data_to_pandas(self.sim_time, self.hbond_count)
y_average_mean = data_frame['y'].rolling(center=False, window=20).mean()
fig = plt.figure(figsize=(7, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(data_frame['x'], data_frame['y'], color='b',
linewidth=0.6, label='LasR')
# Dont plot rolling mean
plt.plot(data_frame['x'], y_average_mean, color='r',
linewidth=0.9, label='LasR rolling mean')
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# remove part of ticks
sns.despine()
fig.savefig(self.simulation_name + '_' + title + '.png', dpi=custom_dpi, bbox_inches='tight')
print('HBond count plot created')
print('-----------------------------------\n')
# for hbond in hbonds:
# print(hbond)
# print(label(hbond))
# atom1 = self.full_traj.topology.atom(hbond[0])
# atom2 = self.full_traj.topology.atom(hbond[2])
# # atom3 = traj_sim1_hbonds.topology.atom(hbond[2])
# if atom1.residue.resSeq != atom2.residue.resSeq:
# if atom1.residue.resSeq + 1 != atom2.residue.resSeq:
# # for domain reside analysis
# if atom1.residue.resSeq < 171 and atom2.residue.resSeq > 172:
# diff_hbonds.append(hbond)
@hlp.timeit
def hbond_analysis(self, selection='protein'):
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
print('selection is ', self.selection)
# this is for keeping selection from trajectory
self.full_traj.restrict_atoms(self.selection)
if self.save_pdb_hbond is True:
traj_sim1_hbonds = md.load_pdb(self.pdb_file_name)
hbonds = md.baker_hubbard(traj_sim1_hbonds, periodic=False)
# hbonds = md.wernet_nilsson(traj_sim1_hbonds, periodic=True)[0]
label = lambda hbond: '%s -- %s' % (traj_sim1_hbonds.topology.atom(hbond[0]),
traj_sim1_hbonds.topology.atom(hbond[2]))
diff_hbonds = []
for hbond in hbonds:
# print(hbond)
# print(label(hbond))
atom1 = traj_sim1_hbonds.topology.atom(hbond[0])
atom2 = traj_sim1_hbonds.topology.atom(hbond[2])
# atom3 = traj_sim1_hbonds.topology.atom(hbond[2])
if atom1.residue.resSeq != atom2.residue.resSeq:
if atom1.residue.resSeq + 1 != atom2.residue.resSeq:
# domain reside analysis
if atom1.residue.resSeq < 171 and atom2.residue.resSeq > 172:
diff_hbonds.append(hbond)
for hbond in diff_hbonds:
print(hbond)
print(label(hbond))
print('Diff hbonds printed\n')
diff_hbonds = np.asarray(diff_hbonds)
self.da_distances = md.compute_distances(traj_sim1_hbonds, diff_hbonds[:, [0, 2]], periodic=False)
import itertools
# color = itertools.cycle(['r', 'b', 'gold'])
# fig = plt.figure(figsize=(7, 7))
# color = np.linspace(0, len(diff_hbonds),len(diff_hbonds))
#
# # color = itertools.cycle(['r', 'b','g','gold'])
# for i in list(range(0,len(diff_hbonds))):
# plt.hist(self.da_distances[:, i], color=next(color), label=label(diff_hbonds[i]), alpha=0.5)
# plt.legend()
# plt.ylabel('Freq');
# plt.xlabel('Donor-acceptor distance [nm]')
# plt.show()
# this works wel, but needs to be modified
fig = plt.figure(figsize=(7, 7))
color = np.linspace(0, len(diff_hbonds), len(diff_hbonds))
color = itertools.cycle(['r', 'b', 'g', 'tan', 'black', 'grey', 'yellow', 'gold'])
for i in list(range(0, len(diff_hbonds))):
plt.hist(self.da_distances[:, i], color=next(color), label=label(diff_hbonds[i]), alpha=0.5)
plt.legend()
plt.ylabel('Freq');
plt.xlabel('Donor-acceptor distance [nm]')
plt.show()
fig.savefig(self.simulation_name + '_hbonds.png', dpi=600, bbox_inches='tight')
print("Hbonds have been calculated")
print('-----------------------------------\n')
@hlp.timeit
def rmsd_analysis(self, selection):
'''
:param selection: has to be mdtraj compatible
:return:
'''
self.called_rmsd_analysis = True
# self.rmsd_traj = self.full_traj[:]
#
# self.topology = self.rmsd_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.rmsd_traj.restrict_atoms(self.selection)
# self.full_traj.save(selection +'.pdb')
# this is for keeping selection from trajectory
# self.rmsd_traj.restrict_atoms(self.selection)
# self.rmsd_traj = self.full_traj[:]
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
# self.selection = self.topology.select(selection)
# print('selection is ', self.selection)
self.rmsd_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.sim_rmsd = md.rmsd(self.rmsd_traj, self.rmsd_traj, 0)
self.sim_time = self.rmsd_traj.time / 1000
self.rmsd_analysis_data.update({selection: self.sim_rmsd})
self.regression_fit_range = 10
print('RMSD analysis has been called on selection {0}\n'.format(selection))
print('-----------------------------\n')
@hlp.timeit
def plot_rmsd_cluster_color(self, selection,
title='LasR RMSD',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=300,
lang='rus'):
import pylab as plt
sns.set(style="ticks", context='paper')
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
fig = plt.figure(figsize=(14, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# plt.plot(self.sim_time, self.sim_rmsd, color=self.cluster_colors,
# linewidth=0.6, label='LasR')
if lang == 'rus':
title = 'Симуляция'
xlabel = r"Время $t$ (нс)"
ylabel = r"RMSD(нм)"
else:
title = 'Simulation'
xlabel = r"Time $t$ (ns)"
ylabel = r"RMSD(nm)"
sns.set(font_scale=2)
plt.plot(self.sim_time, self.sim_rmsd, zorder=1)
traj_rmsd = self.rmsd_analysis_data[selection]
plt.scatter(self.sim_time, traj_rmsd, marker='o', s=30, facecolor='0.5', lw=0,
c=self.cluster_colors, zorder=2)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.xlim(self.sim_time[0], self.sim_time[-1])
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
fig.tight_layout()
# remove part of ticks
sns.despine()
# plt.show()
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '_cluster_color' + '_' + lang + '.png',
dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def plot_rmsf(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=300):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
sns.set(font_scale=2)
traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
conv_data = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf)
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
fig = plt.figure(figsize=(14, 7))
plt.plot(conv_data['x'], conv_data['y'], color='b',
linewidth=0.6, label=title)
plt.xlabel(xlabel)
plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# remove part of ticks
sns.despine()
fig.savefig(self.simulation_name + '_' + title + '_rmsf.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF plot created')
@hlp.timeit
def plot_rg(self,
selection,
title='LasR Rg',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ Rg from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
custom_dpi=600):
import pylab as plt
sns.set(style="ticks", context='paper')
sns.set(font_scale=2)
# In[27]:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
traj_rg = self.rg_analysis_data[selection]
plt.plot((self.sim_time), traj_rg, color='b',
linewidth=0.6, label='LasR')
plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
# need to select only protein for analysis
@hlp.timeit
def find_centroid(self):
atom_indices = [a.index for a in self.full_traj.topology.atoms if a.element.symbol != 'H']
distances = np.empty((self.full_traj.n_frames, self.full_traj.n_frames))
for i in range(self.full_traj.n_frames):
distances[i] = md.rmsd(self.full_traj, self.full_traj, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.std()).sum(axis=1).argmax()
print(index)
centroid = self.full_traj[index]
print(centroid)
centroid.save('centroid.pdb')
####################################################################################################################
# TODO do PCA transformation of MD simulation
@hlp.timeit
def md_pca_analysis(self, selection='protein'):
self.called_md_pca_analysis = True
print('PCA analysis has been called\n')
print('-------------------------------\n')
pca1 = PCA(n_components=2)
# this is for keeping selection from trajectory
# self.pca_traj = self.full_traj[:]
#
# self.topology = self.pca_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.pca_traj.restrict_atoms(self.selection)
# self.full_traj.save(selection +'.pdb')
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
self.pca_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.pca_traj.superpose(self.pca_traj, 0)
self.reduced_cartesian = pca1.fit_transform(
self.pca_traj.xyz.reshape(self.pca_traj.n_frames, self.pca_traj.n_atoms * 3))
print(self.reduced_cartesian.shape)
print("PCA transformation finished successfully")
print('-----------------------------------\n')
####################################################################################################################
@hlp.timeit
def extract_info_cluster_data(self, cluster_data, key):
temp_data = []
for clust_num in self.range_n_clusters:
temp_data.append(cluster_data[clust_num][key])
return temp_data
@hlp.timeit
def silhouette_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.sil_pca
criteria_name = 'Mean Silhouette Coefficient for all samples'
score_text = 'Objects with a high silhouette value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
def calinski_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.calinski_pca
criteria_name = 'Calinski-Harabaz score'
score_text = 'Objects with a high Calinski-Harabaz score value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dunn_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dunn_pca
criteria_name = "Dunn's Index"
score_text = "Maximum value of the index represents the right partitioning given the index"
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dbi_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dbi_pca
criteria_name = 'Davis-Bouldain Index'
score_text = 'The optimal clustering solution has the smallest Davies-Bouldin index value.'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def select_number_of_clusters(self):
# ["foo", "bar", "baz"].index("bar")
max_silhouette = max(self.sil_pca)
max_dunn = max(self.dunn_pca)
min_dbi = min(self.dbi_pca)
sil_index = self.sil_pca.index(max_silhouette)
dunn_index = self.dunn_pca.index(max_dunn)
dbi_index = self.dbi_pca.index(min_dbi)
cluster_quantity = []
cluster_quantity.append(self.range_n_clusters[sil_index])
cluster_quantity.append(self.range_n_clusters[dunn_index])
cluster_quantity.append(self.range_n_clusters[dbi_index])
print('------------------------------------------------')
print('verify yolo', cluster_quantity)
cluster_set = set(cluster_quantity)
cluster_dict = {}
for n_set in cluster_set:
count = cluster_quantity.count(n_set)
cluster_dict.update({n_set: count})
print('verify yolo ', cluster_dict)
import operator
clust_num = max(cluster_dict.items(), key=operator.itemgetter(1))[0]
print("number of clusters is ", clust_num)
return clust_num
# def write_model_to_file(self, model, resnum=None, filename_pdb=None):
# curr_df = model['molDetail']['dataframe']
# pdb_tools.write_lig(curr_df, resnum, filename_pdb)
# need to select only protein for analysis
@hlp.timeit
def find_max_cluster(self):
length = 0
clust_temp_data = []
for k in self.clusterized_data:
data = self.clusterized_data[k]
if len(data) > length:
length = len(data)
clust_temp_data = data
self.max_clust_temp_data = clust_temp_data
return self.max_clust_temp_data
@hlp.timeit
def find_clusters_centroid(self):
print('Find Clusters centroids is called\n')
print('-----------------------------------\n')
self.called_find_clusters_centroid = True
self.clusters_centroids = []
for k in self.clusterized_data:
print('Finding centroid for cluster {0}'.format(k))
clust_temp_data = self.clusterized_data[k]
atom_indices = [a.index for a in clust_temp_data.topology.atoms if a.element.symbol != 'H']
distances = np.empty((clust_temp_data.n_frames, clust_temp_data.n_frames))
for i in range(clust_temp_data.n_frames):
distances[i] = md.rmsd(clust_temp_data, clust_temp_data, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.std()).sum(axis=1).argmax()
print(index)
centroid = clust_temp_data[index]
# self.centroid_conf = centroid
# print(centroid)
# self.centroid_conf = centroid
self.clusters_centroids.append(centroid)
centroid.save(self.simulation_name + '_' + '{0}_cluster_centroid.pdb'.format(k))
print('-----------------------------------\n')
@hlp.timeit
def find_max_cluster_centroid(self):
print('Find Max Cluster centroid is called\n')
print('-----------------------------------\n')
self.called_find_max_cluster_centroid = True
clust_temp_data = self.max_clust_temp_data
atom_indices = [a.index for a in clust_temp_data.topology.atoms if a.element.symbol != 'H']
distances = np.empty((clust_temp_data.n_frames, clust_temp_data.n_frames))
for i in range(clust_temp_data.n_frames):
distances[i] = md.rmsd(clust_temp_data, clust_temp_data, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.std()).sum(axis=1).argmax()
print(index)
centroid = clust_temp_data[index]
self.centroid_conf = centroid
print(centroid)
self.centroid_conf = centroid
centroid.save(self.simulation_name + '_' + 'max_cluster_centroid.pdb')
print('-----------------------------------\n')
# need to find a way to extract models correctrly
@hlp.timeit
def export_cluster_models(self,
selection_obj='protein',
select_lig=None,
save_data=False, nth_frame=1):
'''
Save cluster data to pdb files in cluster_traj directory
:return:
'''
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
labels = cluster_labels
sample_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
unique_labels = list(set(cluster_labels))
print('Unique labels ', unique_labels)
original_data = self.full_traj
self.clusterized_data = {}
for k in unique_labels: # Need to modify WORKS
# print('k is ',k)
# k == -1 then it is an outlier
if k != -1:
cluster_data = []
xyz = original_data[labels == k]
# sel_traj = xyz[:]
topology = xyz.topology
selection_name = selection_obj
selection_final_name = selection_obj
selection = topology.select(selection_obj)
selection_final = selection
if select_lig is not None:
# selection1 = topology.select(select_lig)
# selection_final = np.concatenate((selection, selection1))
# selection_name = selection_name + ' and ' + select_lig
#
# selection_final = list(topology.select(selection_obj)) + list(topology.select(select_lig))
selection_final_name = selection_obj + '+' + select_lig
selection_final = topology.select(selection_obj + ' or ' + select_lig)
# list(topology.select(selection_obj)) + list(topology.select(select_lig))
sel_traj = xyz.atom_slice(atom_indices=selection_final)
# sel_traj.restrict_atoms(selection_final)
clust_num = int(k) + 1
if save_data is True:
temp_data = sel_traj[::nth_frame]
temp_data[0].save(self.simulation_name + '_' + 'cluster_' + str(
clust_num) + '_' + selection_final_name + '_frame_0.pdb')
temp_data.save(
self.simulation_name + '_' + 'cluster_' + str(clust_num) + '_' + selection_final_name + '.xtc')
self.clusterized_data.update({k: sel_traj})
self.save_pdb_hbond = True
def save_analysed_data(self, filename):
'''
:param filename: Saves clustered data to pickle file
:return:
'''
# import json
# with open(filename, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
import pickle
# pickle.dump(self.cluster_models, open(filename, "wb"))
pickle.dump(self, open(filename, "wb"))
# should I add json saving of information or not?
def load_analysed_data(self, filename):
'''
:param filename: load pickle file
:return:
'''
self.analysed_data = pickle.load(open(filename, "rb"))
print('test')
####################################################################################################################
# TODO calc ramachandran part
@hlp.timeit
def ramachandran_calc(self):
self.atoms, self.bonds = self.full_traj.topology.to_dataframe()
self.phi_indices, self.phi_angles = md.compute_phi(self.full_traj, periodic=False)
self.psi_indices, self.psi_angles = md.compute_psi(self.full_traj, periodic=False)
self.angles_calc = md.compute_dihedrals(self.full_traj, [self.phi_indices[0], self.psi_indices[0]])
@hlp.timeit
def ramachandran_plot(self):
from math import pi
fig = plt.figure(figsize=(7, 7))
plt.title('Dihedral Map:')
plt.scatter(self.angles_calc[:, 0], self.angles_calc[:, 1], marker='x', c=self.full_traj.time)
cbar = plt.colorbar()
cbar.set_label('Time [ps]')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.xlim(-pi, pi)
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.ylim(-pi, pi)
fig.savefig(self.simulation_name + '_' + 'Ramachandran_analysis' + '.png', dpi=600, bbox_inches='tight')
print("Ramachandran plot created")
print('-----------------------------------\n')
@hlp.timeit
def ramachandran_calc_centroid(self, selection='backbone'):
print('Ramachandran centroid calc has been called\n')
print('------------------------------------------\n')
self.called_ramachandran_centroid_calc = True
self.centroid_topology = self.centroid_conf.topology
self.centroid_selection = self.centroid_topology.select(selection)
self.centroid_new_traj = self.centroid_conf.atom_slice(atom_indices=self.centroid_selection)
self.atoms_centroid, self.bonds_centroid = self.centroid_new_traj.topology.to_dataframe()
self.phi_indices_centroid, self.phi_angles_centroid = md.compute_phi(self.centroid_conf, periodic=False)
self.psi_indices_centroid, self.psi_angles_centroid = md.compute_psi(self.centroid_conf, periodic=False)
self.angles_calc_centroid_list = []
for i, y in zip(self.phi_indices_centroid, self.psi_indices_centroid):
temp = md.compute_dihedrals(self.centroid_conf, [i, y])
self.angles_calc_centroid_list.append(temp[0])
self.angles_calc_centroid = np.array(self.angles_calc_centroid_list, dtype=np.float64)
print('------------------------------------------\n')
@hlp.timeit
def ramachandran_plot_centroid(self):
from math import pi
fig = plt.figure(figsize=(7, 7))
plt.title('Dihedral Map:')
plt.scatter(self.angles_calc_centroid[:, 0], self.angles_calc_centroid[:, 1], marker='x')
# cbar = plt.colorbar()
# cbar.set_label('Time [ps]')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.xlim(-pi, pi)
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.ylim(-pi, pi)
fig.savefig(self.simulation_name + '_' + 'Ramachandran_analysis_centroid' + '.png', dpi=600,
bbox_inches='tight')
print("Ramachandran plot created")
print('-----------------------------------\n')
####################################################################################################################
# gmx trjconv -s md_0_1.tpr -f md_0_1.xtc -o md_0_1_noPBC.xtc -pbc mol -ur compact
# gmx trjconv -s md_0_3.tpr -f md_0_3_noPBC.xtc -o md_0_3_clear.xtc -fit rot+trans
# def get_gmx_command(self):
# sim1_file_tpr = sim1 + '/md_0_3.tpr'
#
# # In[39]:
#
# sim1_out = sim1 + '/md_sim1.pdb'
#
# # In[40]:
#
# index = sim1 + '/index.ndx'
#
# # In[41]:
#
# trj_conv = 'gmx trjconv -f {0} -s {1} -n {2} -o {3} -dt 500'.format(sim1_file_traj, sim1_file_tpr, index,
# sim1_out)
#
# # traj_sim1_hbonds = md.load(sim1_out)
#
#
# # In[44]:
#
# # traj_sim1_hbonds
#
#
# # In[45]:
#
# sim1_clear = sim1 + '/md_sim1_clear.pdb'
#
# # In[46]:
#
# traj_sim1_hbonds = md.load_pdb(sim1_clear)
#
# # In[47]:
#
# traj_sim1_hbonds
#
# # In[48]:
#
# traj_sim1_hbonds[-1].save('QRC_sim0_lastFrame.pdb')
#
# # In[49]:
#
# traj_sim1_hbonds[0].save('QRC_sim0_firstFrame.pdb')
#
# # In[50]:
#
# traj_sim1_hbonds[0:-1:30].save('QRC_sim0_shortAnimation.pdb')
#
# # In[51]:
#
# hbonds = md.baker_hubbard(traj_sim1_hbonds, freq=0.8, periodic=False)
#
# # In[52]:
#
# hbonds = md.wernet_nilsson(traj_sim1_hbonds[-1], periodic=True)[0]
#
# # In[53]:
#
# sel
#
# # In[54]:
#
# # for hbond in hbonds:
# # # print(hbond)
# # print(label(hbond))
#
#
# # In[55]:
#
# da_distances = md.compute_distances(traj_sim1_hbonds, hbonds[:, [0, 2]], periodic=False)
#
# # In[56]:
#
# import itertools
#
# # In[57]:
#
# color = itertools.cycle(['r', 'b', 'gold'])
# for i in [2, 3, 4]:
# plt.hist(da_distances[:, i], color=next(color), label=label(hbonds[i]), alpha=0.5)
# plt.legend()
# plt.ylabel('Freq');
# plt.xlabel('Donor-acceptor distance [nm]')
#
# # TEST ORIGIANL EXAMPLE
# #
#
# # Check for HSL_LasR_1
#
# # In[ ]:
def get_data_for_analysis(self):
return self.analysis_structure
def drawVectors(self, transformed_features, components_, columns, plt, scaled):
if not scaled:
return plt.axes() # No cheating ;-)
num_columns = len(columns)
# This funtion will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:, 0])
yvector = components_[1] * max(transformed_features[:, 1])
## visualize projections
# Sort each column by it's length. These are your *original*
# columns, not the principal components.
important_features = {columns[i]: math.sqrt(xvector[i] ** 2 + yvector[i] ** 2) for i in range(num_columns)}
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print("Features by importance:\n", important_features)
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75)
plt.text(xvector[i] * 1.2, yvector[i] * 1.2, list(columns)[i], color='b', alpha=0.75)
return ax
# test code
@hlp.timeit
def rmsf_calc(self, target=None, reference=None, frame=0, wrt=False, atom_indices=None, ref_atom_indices=None):
'''
use backbone for selection
Looks like GROMACS uses WRT
'''
self.called_rmsf_calc = True
print('RMSF analysis has been called\n')
print('-----------------------------\n')
self.topology = self.full_traj.topology
atom_indices = self.topology.select(atom_indices)
ref_atom_indices_name = ref_atom_indices
ref_atom_indices = self.topology.select(ref_atom_indices)
self.atom_indices = atom_indices
self.ref_atom_indices = ref_atom_indices
# this is for keeping selection from trajectory
# self.full_traj.restrict_atoms(self.selection)
self.sim_time = self.full_traj.time / 1000
trajectory = self.full_traj
trajectory.superpose(self.full_traj[frame], atom_indices=atom_indices, ref_atom_indices=ref_atom_indices)
if wrt is True:
avg_xyz = np.mean(trajectory.xyz[:, atom_indices, :], axis=0)
self.avg_xyz = avg_xyz
self.sim_rmsf = np.sqrt(3 * np.mean((trajectory.xyz[:, atom_indices, :] - avg_xyz) ** 2, axis=(0, 2)))
else:
reference = trajectory[frame]
self.sim_rmsf = np.sqrt(
3 * np.mean((trajectory.xyz[:, atom_indices, :] - reference.xyz[:, ref_atom_indices, :]) ** 2,
axis=(0, 2)))
self.rmsf_analysis_data.update({ref_atom_indices_name: {'atom_indices': self.atom_indices,
'ref_atom_indices': self.ref_atom_indices,
'rmsf': self.sim_rmsf}})
print('-----------------------------\n')
return self.sim_rmsf
@hlp.timeit
def pca_analysis(self):
scaleFeatures = False
df = self.data_for_analysis
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
T = pca.transform(df)
# ax = self.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(T)
T.columns = ['component1', 'component2']
# T.plot.scatter(x='component1', y='component2', marker='o', s=300, alpha=0.75) # , ax=ax)
# plt.show()
return T
@hlp.timeit
def pca_analysis_reshape(self):
scaleFeatures = False
df = self.data_for_analysis
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
T = pca.transform(df)
# ax = self.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(T)
T.columns = ['component1', 'component2']
# T.plot.scatter(x='component1', y='component2', marker='o', s=300, alpha=0.75) # , ax=ax)
# plt.show()
return T
@hlp.timeit
def iso_analysis(self, n_neighbours=3):
scaleFeatures = False
df = self.data_for_analysis
from sklearn import manifold
iso = manifold.Isomap(n_neighbours, n_components=2)
iso.fit(df)
manifold = iso.transform(df)
# Plot2D(manifold, 'ISOMAP 0 1', 0, 1, num_to_plot=40)
# Plot2D(manifold, 'ISOMAP 1 2', 1, 2, num_to_plot=40)
# ax = self.drawVectors(manifold, iso.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(manifold)
T.columns = ['component1', 'component2']
T.plot.scatter(x='component1', y='component2', marker='o', alpha=0.75) # , ax=ax)
plt.show()
@hlp.timeit
def hdbscan_pca(self):
# fignum = 2
# fig = plt.figure(fignum)
# plt.clf()
# plt.subplot(321)
X = self.pca_data
db = hdbscan.HDBSCAN(min_cluster_size=200)
labels = db.fit_predict(X)
print('labels ', labels)
#
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# core_samples_mask[db.core_sample_indices_] = True
# labels = db.labels_
# print('labels is ',labels)
print('labels shape is ', labels.shape[0])
# print('db are ',db.components_)
labelsShape = labels.shape[0]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# plot_frequency(labels)
print('Estimated number of clusters: %d' % n_clusters_)
unique_labels = list(set(labels))
print('Unique labels ', unique_labels)
worthy_data = labels[labels != -1]
notWorthy_data = labels[labels == -1]
real_labels = set(worthy_data)
# print("Worthy Data ",worthy_data)
print("Real Labels man ", real_labels)
shape_worthy = worthy_data.shape[0]
print("All Worthy data points ", int(shape_worthy))
print("Not Worthy data points ", int(notWorthy_data.shape[0]))
# plt.cla()
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
# print("Colors is ",colors)
# Here could be the solution
dtype = [('label', np.int8), ('CLx', np.float64), ('CLy', np.float64), ('CLz', np.float64),
('bindMean', np.float64),
('bindStd', np.float64), ('quantity', int), ('percentage', np.float64), ('rmsd', np.float64), ]
cluster_Center_Data = np.empty((0,), dtype=dtype) # This is for clusters
# print("cluster_Center_Data ",clean_Data, clean_Data.shape)
# print("clean Data dtype ", clean_Data.dtype)
# print("clean Data [0] dtype" ,dtype[0])
label_percent = {}
# Need to return X, clean_data, and another dict for best position
molOrder = {}
for k in unique_labels: # Need to modify WORKS
# print('k is ',k)
xyz = X[labels == k]
if k == -1:
color = 'b'
# print('what the hell ', xyz[:, 4])
plt.scatter(xyz['component1'], xyz['component2'], facecolor=(0, 0, 0, 0), marker='^', s=80, c=color,
label='Outlier size={0}'.format(xyz.shape))
# xyz.plot.scatter(x='component1', y='component2', marker='^',s=100, alpha=0.75)
else:
# Need to make this function a lot better
print('xyz is ', xyz)
plt.scatter(xyz['component1'], xyz['component2'], marker='o', s=120, c=colors[k], edgecolor='g',
label="size={0}".format(xyz.shape))
# label="deltaG = %s±%s (%s%%) label=%s rmsd = %s A" % (
# round(bind_mean, 2), round(bind_std, 2), percentage, k, curr_rmsd))
# xyz.plot.scatter(x='component1', y='component2', marker='o', s=100, c=alpha=0.75)
# plt.set_xlabel('X')
# plt.set_ylabel('Y')
# plt.set_zlabel('Z')
plt.legend(loc='lower left', ncol=3, fontsize=8, bbox_to_anchor=(0, 0))
plt.title('Estimated number of clusters: %d (%d/%d)' % (n_clusters_, shape_worthy, X.shape[0]))
plt.show() # not now
@hlp.timeit
def silhouette_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.sil_pca
criteria_name = 'Mean Silhouette Coefficient for all samples'
score_text = 'Objects with a high silhouette value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def calinski_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.calinski_pca
criteria_name = 'Calinski-Harabaz score'
score_text = 'Objects with a high Calinski-Harabaz score value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dunn_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dunn_pca
criteria_name = "Dunn's Index"
score_text = "Maximum value of the index represents the right partitioning given the index"
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dbi_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dbi_pca
criteria_name = 'Davis-Bouldain Index'
score_text = 'The optimal clustering solution has the smallest Davies-Bouldin index value.'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def select_number_of_clusters(self):
# ["foo", "bar", "baz"].index("bar")
max_silhouette = max(self.sil_pca)
max_dunn = max(self.dunn_pca)
min_dbi = min(self.dbi_pca)
sil_index = self.sil_pca.index(max_silhouette)
dunn_index = self.dunn_pca.index(max_dunn)
dbi_index = self.dbi_pca.index(min_dbi)
cluster_quantity = []
cluster_quantity.append(self.range_n_clusters[sil_index])
cluster_quantity.append(self.range_n_clusters[dunn_index])
cluster_quantity.append(self.range_n_clusters[dbi_index])
print('------------------------------------------------')
print('verify yolo', cluster_quantity)
cluster_set = set(cluster_quantity)
cluster_dict = {}
for n_set in cluster_set:
count = cluster_quantity.count(n_set)
cluster_dict.update({n_set: count})
print('verify yolo ', cluster_dict)
import operator
clust_num = max(cluster_dict.items(), key=operator.itemgetter(1))[0]
print("number of clusters is ", clust_num)
return clust_num
@hlp.timeit
def collect_cluster_info(self):
data = self.clusters_info[self.clust_num]
print(data)
labels = data['labels']
# Make more flexible whether pca_data or not
pca_data = self.full_traj
original_data = self.analysis_structure # self.pca_data
cluster_list = {}
unique_labels = list(set(labels))
for k in unique_labels: # Need to modify WORKS
# print('k is ',k)
# k == -1 then it is an outlier
if k != -1:
cluster_data = []
xyz = original_data[labels == k]
model_num = xyz['ModelNum']
for i in model_num:
# print(i)
temp_data = self.equiv_models[i]
cluster_data.append(temp_data)
# print(xyz.describe())
cluster_list.update({k: cluster_data})
# print(cluster_list)
return cluster_list
# def write_model_to_file(self, model, resnum=None, filename_pdb=None):
# curr_df = model['molDetail']['dataframe']
# pdb_tools.write_lig(curr_df, resnum, filename_pdb)
def save_analysed_data(self, filename):
'''
:param filename: Saves clustered data to pickle file
:return:
'''
# import json
# with open(filename, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
import pickle
# pickle.dump(self.cluster_models, open(filename, "wb"))
pickle.dump(self, open(filename, "wb"))
# should I add json saving of information or not?
def load_analysed_data(self, filename):
'''
:param filename: load pickle file
:return:
'''
self.analysed_data = pickle.load(open(filename, "rb"))
print('test')
# create another function that shows only the best plot for kmeans
@hlp.timeit
def show_silhouette_analysis_pca_best(self, show_plot=False, custom_dpi=300):
# self.clusters_info.update({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
# 'calinski': calinski_avg, 'silhouette': silhouette_avg,
# 'labels': cluster_labels, 'centers': centers,
# 'silhouette_values': sample_silhouette_values}})
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
sample_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
X = self.reduced_cartesian
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
sns.set(font_scale=2)
# sns.axes_style()
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
# TODO a new try
colors = sns.cubehelix_palette(n_colors=n_clusters, rot=-.4)
self.colors_ = colors
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
# color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=colors[i], edgecolor=colors[i], alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = converters.convert_to_colordata(cluster_labels, colors)
# colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
#
#
# my_cmap = sns.cubehelix_palette(n_colors=n_clusters)
self.cluster_colors = colors
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
c=colors)
# ax2.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
# c=self.full_traj.time)
# Labeling the clusters
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=100)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=100)
ax2.set_title("The visualization of the clustered data")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on conformation data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
fig.savefig(self.simulation_name + '_' + 'Best_cluster_analysis_md_' + '.png', dpi=custom_dpi,
bbox_inches='tight')
if show_plot is True:
plt.show()
@hlp.timeit
def show_cluster_analysis_pca_best(self, show_plot=False, custom_dpi=600):
# self.clusters_info.update({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
# 'calinski': calinski_avg, 'silhouette': silhouette_avg,
# 'labels': cluster_labels, 'centers': centers,
# 'silhouette_values': sample_silhouette_values}})
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
sample_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
X = self.reduced_cartesian
# Create a subplot with 1 row and 2 columns
fig = plt.figure(figsize=(10, 10))
# fig.set_size_inches(18, 7)
sns.set(font_scale=2)
# TODO a new try
colors = self.colors_
# 2nd Plot showing the actual clusters formed
colors = converters.convert_to_colordata(cluster_labels, colors)
# colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
#
#
# my_cmap = sns.cubehelix_palette(n_colors=n_clusters)
self.cluster_colors = colors
plt.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
c=colors)
# ax2.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
# c=self.full_traj.time)
# Labeling the clusters
# Draw white circles at cluster centers
plt.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=800)
for i, c in enumerate(centers):
clust_num = i + 1
plt.scatter(c[0], c[1], marker='$%d$' % clust_num, alpha=1, s=800)
plt.title("The visualization of the clustered data")
plt.xlabel("Feature space for the 1st feature")
plt.ylabel("Feature space for the 2nd feature")
# plt.suptitle(("Silhouette analysis for KMeans clustering on conformation data "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
fig.savefig(self.simulation_name + '_' + 'Best_cluster_analysis_simple_md_' + '.png', dpi=custom_dpi,
bbox_inches='tight')
if show_plot is True:
plt.show()
@hlp.timeit
def silhouette_analysis_pca(self, show_plots=False):
self.sil_pca = []
self.calinski_pca = []
self.dunn_pca = []
self.dbi_pca = []
X = self.pca_data
for n_clusters in self.range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
centers = clusterer.cluster_centers_
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
calinski_avg = calinski_harabaz_score(X, cluster_labels)
# looks like this is ok
dunn_avg = dunn_fast(X, cluster_labels)
converted_values = converters.convert_pandas_for_dbi_analysis(X, cluster_labels)
david_bouldain = davisbouldin(converted_values, centers)
# pseudo_f = pseudoF_permanova(X, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The pseudo_f is :", pseudo_f)
print("For n_clusters =", n_clusters,
"The average dunn is :", dunn_avg)
print("For n_clusters =", n_clusters,
"The average dbd is :", david_bouldain)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print("For n_clusters =", n_clusters,
"The average calinski_harabaz_score is :", calinski_avg)
# Store info for each n_clusters
# self.clusters_info.update({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
# 'calinski': calinski_avg, 'silhouette': silhouette_avg,
# 'labels': cluster_labels, 'centers': centers}})
# Make decision based on average and then round value that would be your cluster quanity
print('------------------------------------------------------------')
self.sil_pca.append(silhouette_avg)
self.calinski_pca.append(calinski_avg)
self.dunn_pca.append(dunn_avg)
self.dbi_pca.append(david_bouldain)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
self.clusters_info.update({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
'calinski': calinski_avg, 'silhouette': silhouette_avg,
'labels': cluster_labels, 'centers': centers,
'silhouette_values': sample_silhouette_values}})
if show_plots is True:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X['component1'], X['component2'], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=100)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=100)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
@hlp.timeit
def silhouette_analysis(self):
range_n_clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10]
X = self.pca_data
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X['X'], X['Y'], X['Z'], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
@hlp.timeit
def plotHist(self):
self.analysis_structure['BindingEnergy'].plot.hist()
plt.show()
@hlp.timeit
def MeanShift(self):
# print(X.describe)
bandwidth = estimate_bandwidth(X)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
@hlp.timeit
def plot_results(self, X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
@hlp.timeit
def VBGMM(self):
X = self.pca_data
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
self.plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
self.plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
@hlp.timeit
def transform_for_analysis(self):
model = 1
columns_dock_center = ['ModelNum', 'X', 'Y', 'Z', 'BindingEnergy']
dock_df = pd.DataFrame(columns=columns_dock_center)
for i in sorted(self.samples_data.keys()):
models = self.samples_data[i]
# print(model)
for y in models.mol_data__:
# This should be the structure for equivalency of models
# print(model, i, y)
self.equivalent_models.update({model: {'file': i, 'modelNum': y,
'molDetail': models.mol_data__[y]}})
curr_model = models.mol_data__[y]
curr_frame = curr_model['dataframe']
curr_x = curr_frame['X'].mean()
curr_y = curr_frame['Y'].mean()
curr_z = curr_frame['Z'].mean()
curr_bind = curr_model['vina_info'][0]
dock_df.loc[model] = [int(model), curr_x, curr_y, curr_z, curr_bind]
# print(y, models.mol_data__[y]['dataframe'])
model += 1
# print(self.equivalent_models)
dock_df['ModelNum'] = dock_df['ModelNum'].astype(int)
return dock_df
def get_mol_data(self):
return self.mol_data__
@hlp.timeit
def transform_data(self):
mol_data = {}
for model, model_info in zip(self.object, self.info):
# print(model_info)
pandas_model = self.pandas_transformation(model)
mol_data.update({model_info[0]: {'dataframe': pandas_model, 'vina_info': model_info[1:]}})
return mol_data
@hlp.timeit
def pandas_transformation(self, list_object_mol):
columns_pdbqt = ['ATOM', 'SerialNum', 'AtomName', 'ResidueName', 'ChainId',
'ChainNum', 'X', 'Y', 'Z', 'Occupancy', 'TempFactor', 'Charge', 'ElemSymbol']
self.df = pd.DataFrame(list_object_mol, columns=columns_pdbqt)
self.df['X'] = pd.to_numeric(self.df['X'])
self.df['Y'] = | pd.to_numeric(self.df['Y']) | pandas.to_numeric |
#Library of functions called by SimpleBuildingEngine
import pandas as pd
import numpy as np
def WALLS(Btest=None):
#Building height
h_building = 2.7#[m]
h_m_building = h_building / 2
h_cl = 2.7# heigth of a storey
#number of walls
n_walls = 7
A_fl = 48
#WALLS CHARACTERISTICS
#Orientation
ori = pd.Series([('S'), ('W'), ('N'), ('E'), ('R'), ('F'), ('C')])
#Surface azimuth
surf_az = pd.Series([0, 90, 180 - 90, 0, 0, 0])
#Slopes (90:vertical; 0:horizontal)
slope = pd.Series([90, 90, 90, 90, 0, 0, 0])
#Masks
f_low_diff = pd.Series([1, 1, 1, 1, 1, 1, 1])
f_low_dir = pd.Series([1, 1, 1, 1, 1, 1, 1])
#U VALUES
U_hopw = pd.Series([0.5144, 0.5144, 0.5144, 0.5144, 0.3177, 0, 0])
U_lopw = pd.Series([3, 3, 3, 3, 3, 3, 3])
U_fr = pd.Series([2.4, 2.4, 2.4, 2.4, 2.4, 2.4, 2.4])
U_gl = pd.Series([3, 3, 3, 3, 3, 3, 3])
if (Btest == 195 or Btest == 395):
#SURFACES
#Heavy Opaque walls
A_hopw = pd.Series([21.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 200 or Btest == 210 or Btest == 230 or Btest == 240 or Btest == 250 or Btest == 400 or Btest == 410
or Btest == 420 or Btest == 430 or Btest == 800):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = | pd.Series([12, 0, 0, 0, 0, 0, 0]) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import os
import argparse
from pathlib import Path
import joblib
import scipy.sparse
import string
import nltk
from nltk import word_tokenize
nltk.download('punkt')
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
'''
Preprocessing and preperation of data:
The purpose of this script is to prepare and preproces the raw textual data and the admission data needed for training and testing the classification model. This proces includes the following steps:
1. Clean and prepare admission data
2. Extract discharge summaries from note data
3. Remove newborn cases and in-hospital deaths
4. Bind note-data to 30-day readmission information
5. Split into train, validation and test set and balance training data by oversampling positive cases
6. Removal of special characters, numbers and de-identified brackets
7. Vectorise all discharge notes:
7a. Remove stop-words, most common words and very rare words (benchmarks need to be defined)
7b. Create set of TF-IDF weighted tokenised discharge notes
8. Output datasets and labels as CSV-files
'''
# Defining main function
def main(args):
notes_file = args.nf
admissions_file = args.af
NotePreprocessing(notes_file = notes_file, admissions_file = admissions_file)
# Defining class 'NotePreprocessing'
class NotePreprocessing:
def __init__(self, notes_file, admissions_file):
# Setting directory of input data
data_dir = self.setting_data_directory()
# Setting directory of output plots
out_dir = self.setting_output_directory()
# Loading notes
if notes_file is None:
notes = pd.read_csv(data_dir / "NOTEEVENT.csv")
else:
notes = pd.read_csv(data_dir / notes_file)
# Loading general admission data
if admissions_file is None:
admissions = pd.read_csv(data_dir / "ADMISSIONS.csv")
else:
noadmissionstes = pd.read_csv(admissions_file)
#-#-# PREPROCESSING ADMISSIONS DATA #-#-#
# Convert to datetime
admissions.ADMITTIME = pd.to_datetime(admissions.ADMITTIME, format = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
admissions.DISCHTIME = pd.to_datetime(admissions.DISCHTIME, format = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
admissions.DEATHTIME = pd.to_datetime(admissions.DEATHTIME, format = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
# Sort by subject ID and admission date
admissions = admissions.sort_values(['SUBJECT_ID','ADMITTIME'])
admissions = admissions.reset_index(drop = True)
# Create collumn containing next admission time (if one exists)
admissions['NEXT_ADMITTIME'] = admissions.groupby('SUBJECT_ID').ADMITTIME.shift(-1)
# Create collumn containing next admission type
admissions['NEXT_ADMISSION_TYPE'] = admissions.groupby('SUBJECT_ID').ADMISSION_TYPE.shift(-1)
# Replace values with NaN or NaT if readmissions are planned (Category = 'Elective')
rows = admissions.NEXT_ADMISSION_TYPE == 'ELECTIVE'
admissions.loc[rows,'NEXT_ADMITTIME'] = pd.NaT
admissions.loc[rows,'NEXT_ADMISSION_TYPE'] = np.NaN
# It is important that we replace the removed planned admissions with the next unplanned readmission.
# Therefore, we backfill the removed values with the values from the next row that contains data about an unplanned readmission
# Sort by subject ID and admission date just to make sure the order is correct
admissions = admissions.sort_values(['SUBJECT_ID','ADMITTIME'])
# Back fill removed values with next row that contains data about an unplanned readmission
admissions[['NEXT_ADMITTIME','NEXT_ADMISSION_TYPE']] = admissions.groupby(['SUBJECT_ID'])[['NEXT_ADMITTIME','NEXT_ADMISSION_TYPE']].fillna(method = 'bfill')
# Add collumn contain the calculated number of the days until the next admission
admissions['DAYS_NEXT_ADMIT']= (admissions.NEXT_ADMITTIME - admissions.DISCHTIME).dt.total_seconds()/(24*60*60)
# It appears that the reason for the negative values is due to the fact that some of these patients are noted as readmitted before being discharged from their first admission.
# Quick fix for now is to remove these negative values
# Removing rows for which value in DAYS_NEXT_ADMIT is negative
admissions = admissions.drop(admissions[admissions.DAYS_NEXT_ADMIT < 0].index)
# Change data type of DAYS_NEXT_ADMIT to float
admissions['DAYS_NEXT_ADMIT'] = | pd.to_numeric(admissions['DAYS_NEXT_ADMIT']) | pandas.to_numeric |
"""
Demultiplexing of BAM files.
Input: BAM file, fasta file of terminal barcodes and/or internal barcodes
Output: Multiple BAM files containing demultiplexed reads, with the file name indicating the distinguishing barcodes.
If terminal barcodes are not used, output name will be all_X.input.bam, where X is the internal barcode
If internal barcode is not used, output name will be Y_all.input.bam, where Y is the terminal barcode pair used
"""
import datetime
import multiprocessing as mp
import os
import re
import shutil
import time
import gzip
import itertools
import numpy as np
import pandas as pd
import pysam
from Bio import Align, pairwise2, Seq
from Bio.pairwise2 import format_alignment
from Bio import SeqIO
def main():
### Asign variables from config file and input
config = snakemake.config
tag = snakemake.wildcards.tag
BAMin = snakemake.input.aln
### Output variables
outputDir = str(snakemake.output).split(f'/{tag}_demultiplex_complete')[0]
bcp = BarcodeParser(config, tag)
bcp.demux_BAM(BAMin, outputDir)
class BarcodeParser:
def __init__(self, config, tag):
"""
arguments:
config - snakemake config dictionary
tag - tag for which all BAM files will be demultiplexed, defined in config file
"""
self.config = config
self.tag = tag
self.refSeqfasta = config['runs'][tag]['reference']
self.reference = list(SeqIO.parse(self.refSeqfasta, 'fasta'))[0]
self.reference.seq = self.reference.seq.upper()
self.barcodeInfo = config['runs'][tag]['barcodeInfo']
self.barcodeGroups = config['runs'][tag]['barcodeGroups']
@staticmethod
def create_barcodes_dict(bcFASTA, revComp):
"""
creates a dictionary of barcodes from fasta file
barcodes in fasta must be in fasta form: >barcodeName
NNNNNNNN
function will then create a dictionary of these barcodes in the form {NNNNNNNN: barcodeName}
inputs:
bcFASTA: string, file name of barcode fasta file used for reference
revComp: bool, if set to True, barcode sequences will be stored as reverse complements of the sequences in the fasta file
"""
bcDict = {}
for entry in SeqIO.parse(bcFASTA, 'fasta'):
bcName = entry.id
bc = str(entry.seq).upper()
if revComp:
bc = Seq.reverse_complement(bc)
bcDict[bc]=bcName
return bcDict
@staticmethod
def find_barcode_context(sequence, context):
"""given a sequence with barcode locations marked as Ns, and a barcode sequence context (e.g. ATCGNNNNCCGA),
this function will identify the beginning and end of the Ns within the appropriate context if it exists only once.
If the context does not exist or if the context appears more than once, then will return None as the first output
value, and the reason why the barcode identification failed as the second value"""
location = sequence.find(context)
if location == -1:
return None, 'barcode context not present in reference sequence'
N_start = location + context.find('N')
N_end = location + len(context) - context[::-1].find('N')
if sequence[N_end:].find(context) == -1:
return N_start, N_end
else:
return None, 'barcode context appears in reference more than once'
def add_barcode_contexts(self):
"""
adds dictionary of context strings for each type of barcode provided
if no context is specified for a barcode type or
if context cannot be found in the reference sequence, throws an error
"""
dictOfContexts = {}
for bcType in self.barcodeInfo:
try:
dictOfContexts[bcType] = self.barcodeInfo[bcType]['context']
if str(self.reference.seq).find(dictOfContexts[bcType]) == -1:
raise ValueError
except KeyError:
raise ValueError(f'Barcode type not assigned a sequence context. Add context to barcode type or remove barcode type from barcodeInfo for this tag.\n\nRun tag: `{self.tag}`\nbarcode type: `{bcType}`\nreference sequence: `{self.reference.id}`\nreference sequence fasta file: `{self.refSeqfasta}`')
except ValueError:
raise ValueError(f'Barcode context not found in reference sequence. Modify context or reference sequence to ensure an exact match is present.\n\nRun tag: `{self.tag}`\nbarcode type: `{bcType}`\nbarcode sequence context: `{dictOfContexts[bcType]}`\nreference sequence: `{self.reference.id}`\nreference sequence fasta file: `{self.refSeqfasta}`')
self.barcodeContexts = dictOfContexts
def add_barcode_dicts(self):
"""
adds dictionary for each type of barcodes using create_barcodes_dict()
if no .fasta file is specified for a barcode type, or if the provided fasta file
cannot be found, throws an error
"""
dictOfDicts = {}
for bcType in self.barcodeInfo:
try:
fastaFile = self.barcodeInfo[bcType]['fasta']
if self.barcodeInfo[bcType]['reverseComplement']:
RC = True
else:
RC = False
dictOfDicts[bcType] = self.create_barcodes_dict(fastaFile, RC)
except KeyError:
raise ValueError(f'barcode type not assigned a fasta file. Add fasta file to barcode type or remove barcode type from barcodeInfo for this tag.\n\nRun tag: `{self.tag}`\nbarcode type: `{bcType}`\nreference sequence: `{self.reference.id}`')
except FileNotFoundError:
fastaFile = self.barcodeInfo[bcType]['fasta']
raise FileNotFoundError(f'barcode fasta file not found.\n\nRun tag: `{self.tag}`\nbarcode type: `{bcType}`\nreference sequence: `{self.reference.id}`\nfasta file: `{fastaFile}`')
self.barcodeDicts = dictOfDicts
@staticmethod
def hamming_distance(string1, string2):
"""calculates hamming distance, taken from https://stackoverflow.com/questions/54172831/hamming-distance-between-two-strings-in-python"""
return sum(c1 != c2 for c1, c2 in zip(string1.upper(), string2.upper()))
def add_barcode_hamming_distance(self):
"""adds barcode hamming distance for each type of barcode (default value = 0), and ensures that
(1) hamming distances of all possible pairs of barcodes within the fasta file of each barcode
type are greater than the set hamming distance and (2) that each barcode is the same length as
the length of Ns in the provided sequence context"""
hammingDistanceDict = {}
for barcodeType in self.barcodeDicts:
if 'hammingDistance' in self.barcodeInfo[barcodeType]:
hammingDistanceDict[barcodeType] = self.barcodeInfo[barcodeType]['hammingDistance']
else:
hammingDistanceDict[barcodeType] = 0 #default Hamming distance of 0
barcodes = [bc for bc in self.barcodeDicts[barcodeType]]
barcodeLength = self.barcodeContexts[barcodeType].count('N')
for i,bc in enumerate(barcodes):
assert (barcodes.count(bc) == 1), f'Barcode {bc} present more than once in {self.barcodeInfo[barcodeType]["fasta"]} Duplicate barcodes are not allowed.'
assert (len(bc) == barcodeLength), f'Barcode {bc} is longer than the expected length of {barcodeLength} based on {self.barcodeContexts[barcodeType]}'
otherBCs = barcodes[:i]+barcodes[i+1:]
for otherBC in otherBCs:
hamDist = self.hamming_distance(bc, otherBC)
assert (hamDist > hammingDistanceDict[barcodeType]), f'Barcode {bc} is within hammingDistance {hamDist} of barcode {otherBC}'
self.hammingDistances = hammingDistanceDict
@staticmethod
def hamming_distance_dict(sequence, hamming_distance):
"""given a sequence as a string (sequence) and a desired hamming distance,
finds all sequences that are at or below the specified hamming distance away
from the sequence and returns a dictionary where values are the given sequence
and keys are every sequence whose hamming distance from `sequence` is less
than or equal to `hamming_distance`"""
hammingDistanceSequences = [[sequence]] # list to be populated with lists of sequences that are the index hamming distance from the first sequence
for hd in range(0, hamming_distance):
new_seqs = []
for seq in hammingDistanceSequences[hd]:
for i, a in enumerate(seq):
if a != sequence[i]: continue
for nt in list('ATGC'):
if nt == a: continue
else: new_seqs.append(seq[:i]+nt+seq[i+1:])
hammingDistanceSequences.append(new_seqs)
allHDseqs = list(itertools.chain.from_iterable(hammingDistanceSequences)) # combine all sequences into a single list
outDict = {}
for seq in allHDseqs:
outDict[seq] = sequence
return outDict
def add_hamming_distance_barcode_dict(self):
"""Adds a second barcode dictionary based upon hamming distance. For each barcode type, if the
hamming distance is >0, adds a dictionary where values are barcodes defined in the barcode fasta file,
and keys are all sequences of the same length with hamming distance from each specific barcode less than
or equal to the specified hamming distance for that barcode type. Only utilized when a barcode cannot
be found in the provided barcode fasta file and hamming distance for the barcode type is >0"""
hammingDistanceBarcodeLookup = {}
for barcodeType in self.hammingDistances:
hamDist = self.hammingDistances[barcodeType]
if hamDist > 0:
barcodeTypeHDdict = {}
for barcode in self.barcodeDicts[barcodeType]:
barcodeTypeHDdict.update(self.hamming_distance_dict(barcode,hamDist))
hammingDistanceBarcodeLookup[barcodeType] = barcodeTypeHDdict
self.hammingDistanceBarcodeDict = hammingDistanceBarcodeLookup
def find_N_start_end(self, sequence, context):
"""given a sequence with barcode locations marked as Ns, and a barcode sequence context (e.g. ATCGNNNNCCGA),
this function will return the beginning and end of the Ns within the appropriate context if it exists only once.
If the context does not exist or if the context appears more than once, then will return None, None"""
location = sequence.find(context)
if location == -1:
self.failureReason = 'context_not_present_in_reference_sequence'
return 'fail', 'fail'
N_start = location + context.find('N')
N_end = location + len(context) - context[::-1].find('N')
if sequence[N_end:].find(context) == -1:
return N_start, N_end
else:
self.failureReason = 'context_appears_in_reference_more_than_once'
return 'fail', 'fail'
def align_reference(self, BAMentry):
"""given a pysam.AlignmentFile BAM entry,
builds the reference alignment string with indels accounted for"""
index = BAMentry.reference_start
refAln = ''
for cTuple in BAMentry.cigartuples:
if cTuple[0] == 0: #match
refAln += self.reference.seq[index:index+cTuple[1]]
index += cTuple[1]
elif cTuple[0] == 1: #insertion
refAln += '-'*cTuple[1]
elif cTuple[0] == 2: #deletion
index += cTuple[1]
return refAln
def add_group_barcode_type(self):
"""adds a list of the barcodeTypes that are used for grouping, and a list of barcodeTypes that are not
used for grouping, and throws an error if there are different types of groups
defined within the barcodeGroups dictionary
Example: if self.barcodeGroups = {'group1':{'fwd':'barcode1','rvs':'barcode2'},'group2':{'fwd':'barcode3','rvs':'barcode4'}},
will set self.barcodeGroupType as ['fwd', 'rvs']"""
groupedBarcodeTypes = []
ungroupedBarcodeTypes = []
first = True
for group in self.barcodeGroups:
groupDict = self.barcodeGroups[group]
if first:
for barcodeType in self.barcodeInfo: # add barcode types in the order they appear in barcodeInfo
if barcodeType in groupDict:
groupedBarcodeTypes.append(barcodeType)
else:
ungroupedBarcodeTypes.append(barcodeType)
first = False
firstGroupDict = groupDict
else:
assert (all([bcType in groupDict for bcType in firstGroupDict])), f'All barcode groups do not use the same set of barcode types. Group {group} differs from group {firstGroup}'
assert (all([bcType in firstGroupDict for bcType in groupDict])), f'All barcode groups do not use the same set of barcode types. Group {group} differs from group {firstGroup}'
self.groupedBarcodeTypes = groupedBarcodeTypes
self.ungroupedBarcodeTypes = ungroupedBarcodeTypes
def add_barcode_name_dict(self):
"""adds the inverse of dictionary of barcodeGroups, where keys are tuples of
barcode names in the order specified in barcodeInfo, and values are group names, as defined in barcodeGroups.
used to name files that contain sequences containing specific barcode combinations"""
groupNameDict = {}
for group in self.barcodeGroups:
groupDict = self.barcodeGroups[group]
key = tuple()
for barcodeType in self.groupedBarcodeTypes:
if barcodeType in groupDict:
key += tuple([groupDict[barcodeType]])
groupNameDict[key] = group
self.barcodeGroupNameDict = groupNameDict
def get_demux_output_prefix(self, sequenceBarcodesDict):
"""given a dictionary of barcode names for a particular sequence, as specified in the barcodeInfo config dictionary,
uses the barcode group name dictionary `self.barcodeGroupNameDict` created by add_barcode_name_dict
to generate a file name prefix according to their group name if it can be identified, or will
produce a file name prefix simply corresponding to the barcodes if a group can't be identified.
examples:
1)
if the order of barcodes in barcodeInfo is 'fwd','rvs','alt' and
barcodeNamesDict == {'fwd':'one', 'rvs':'two', 'alt':'three'} and self.barcodeGroups == {'group1':{'fwd':'one','rvs':'two'}}:
self.get_demux_output_prefix(barcodeNamesList) will return 'group1–three'
2)
if the order of barcodes in barcodeInfo is 'fwd','rvs','alt' and
barcodeNamesList == ['one', 'two', 'three'] and self.barcodeGroups == {'group1':{'fwd':'two','rvs':'two'}}:
self.get_demux_output_prefix(barcodeNamesList) will return 'one–two–three'
"""
# split barcodes into those utilized by groups and those not utilized by groups. Group tuple used as dictionary key to look up group name
ungroupSequenceBarcodes = []
groupBarcodes = tuple()
for barcodeType in sequenceBarcodesDict:
bc = sequenceBarcodesDict[barcodeType]
if barcodeType in self.groupedBarcodeTypes:
if bc == 'fail':
return '-'.join(sequenceBarcodesDict.values())
groupBarcodes += tuple([bc])
else:
ungroupSequenceBarcodes.append(bc)
try:
groupName = self.barcodeGroupNameDict[groupBarcodes]
return '-'.join([groupName]+ungroupSequenceBarcodes)
except KeyError:
return '-'.join(sequenceBarcodesDict.values())
def id_seq_barcodes(self, refAln, BAMentry):
"""Inputs:
refAln: aligned reference string from align_reference()
BAMentry: pysam.AlignmentFile entry
Returns a list of information for a provided `BAMentry` that will be used for demuxing
and will be added as a row for a demux stats DataFrame"""
sequenceBarcodesDict = {}
outList = []
for barcodeType in self.barcodeDicts:
self.failureReason = None
barcodeName = None
notExactMatch = 0
failureReason = {'context_not_present_in_reference_sequence':0, 'context_appears_in_reference_more_than_once':0, 'barcode_not_in_fasta':0}
start,stop = self.find_N_start_end(refAln, self.barcodeContexts[barcodeType])
try:
barcode = BAMentry.query_alignment_sequence[ start:stop ]
except TypeError:
barcodeName = 'fail'
failureReason[self.failureReason] = 1 # failure reason is set by self.find_N_start_end
if barcodeName != 'fail':
try:
barcodeName = self.barcodeDicts[barcodeType][barcode]
except KeyError:
if barcodeType in self.hammingDistanceBarcodeDict:
try:
closestBarcode = self.hammingDistanceBarcodeDict[barcodeType][barcode]
barcodeName = self.barcodeDicts[closestBarcode]
notExactMatch = 1
except KeyError:
barcodeName = 'fail'
failureReason['barcode_not_in_fasta'] = 1
else:
barcodeName = 'fail'
failureReason['barcode_not_in_fasta'] = 1
sequenceBarcodesDict[barcodeType] = barcodeName
outList += [barcodeName, notExactMatch] + list(failureReason.values())
return self.get_demux_output_prefix(sequenceBarcodesDict), outList
def demux_BAM(self, BAMin, outputDir):
bamfile = pysam.AlignmentFile(BAMin, 'rb')
self.add_barcode_contexts()
self.add_barcode_dicts()
self.add_barcode_hamming_distance()
self.add_hamming_distance_barcode_dict()
self.add_group_barcode_type()
self.add_barcode_name_dict()
# dictionary where keys are file name parts indicating the barcodes and values are file objects corresponding to those file name parts
# file objects are only created if the specific barcode combination is seen
outFileDict = {}
# columns names for dataframe to be generated from rows output by id_seq_barcodes
colNames = ['output_file_barcodes', 'count']
# column names and dictionary for grouping rows in final dataframe
groupByColNames = ['output_file_barcodes']
sumColsDict = {'count':'sum'}
for barcodeType in self.barcodeDicts:
groupByColNames.append(barcodeType)
intCols = [f'{barcodeType}:not_exact_match', f'{barcodeType}_failed:context_not_present_in_reference_sequence', f'{barcodeType}_failed:context_appears_in_reference_more_than_once', f'{barcodeType}_failed:barcode_not_in_fasta']
colNames.extend( [barcodeType] + intCols )
for col in intCols:
sumColsDict[col] = 'sum'
rows = []
os.makedirs(outputDir, exist_ok=True)
for BAMentry in bamfile.fetch(self.reference.id):
refAln = self.align_reference(BAMentry)
outputBarcodes, BAMentryBarcodeData = self.id_seq_barcodes(refAln, BAMentry)
try:
outFileDict[outputBarcodes].write(BAMentry)
except KeyError:
fName = os.path.join(outputDir, f'{self.tag}_{outputBarcodes}.bam')
outFileDict[outputBarcodes] = pysam.AlignmentFile(fName, 'wb', template=bamfile)
outFileDict[outputBarcodes].write(BAMentry)
count = 1
rows.append([outputBarcodes,count] + BAMentryBarcodeData)
for sortedBAM in outFileDict:
outFileDict[sortedBAM].close()
demuxStats = | pd.DataFrame(rows, columns=colNames) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
from catboost import CatBoostRegressor
def featureModify(isTrain):
rowstoread = None
if isTrain:
train = pd.read_csv('../input/train.csv',nrows=rowstoread)
test = | pd.read_csv('../input/test.csv',nrows=rowstoread) | pandas.read_csv |
"""Tests for the sdv.constraints.base module."""
import warnings
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from copulas.multivariate.gaussian import GaussianMultivariate
from copulas.univariate import GaussianUnivariate
from rdt.hyper_transformer import HyperTransformer
from sdv.constraints.base import Constraint, _get_qualified_name, get_subclasses, import_object
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import ColumnFormula, UniqueCombinations
def test__get_qualified_name_class():
"""Test the ``_get_qualified_name`` function, if a class is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a class.
Input:
- A class.
Output:
- The class qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(Constraint)
# Assert
expected_name = 'sdv.constraints.base.Constraint'
assert fully_qualified_name == expected_name
def test__get_qualified_name_function():
"""Test the ``_get_qualified_name`` function, if a function is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a function.
Input:
- A function.
Output:
- The function qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(_get_qualified_name)
# Assert
expected_name = 'sdv.constraints.base._get_qualified_name'
assert fully_qualified_name == expected_name
def test_get_subclasses():
"""Test the ``get_subclasses`` function.
The ``get_subclasses`` function is expected to:
- Recursively find subclasses for the class object passed.
Setup:
- Create three classes, Parent, Child and GrandChild,
which inherit of each other hierarchically.
Input:
- The Parent class.
Output:
- Dict of the subclasses of the class: ``Child`` and ``GrandChild`` classes.
"""
# Setup
class Parent:
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
# Run
subclasses = get_subclasses(Parent)
# Assert
expected_subclasses = {
'Child': Child,
'GrandChild': GrandChild
}
assert subclasses == expected_subclasses
def test_import_object_class():
"""Test the ``import_object`` function, when importing a class.
The ``import_object`` function is expected to:
- Import a class from its qualifed name.
Input:
- Qualified name of the class.
Output:
- The imported class.
"""
# Run
obj = import_object('sdv.constraints.base.Constraint')
# Assert
assert obj is Constraint
def test_import_object_function():
"""Test the ``import_object`` function, when importing a function.
The ``import_object`` function is expected to:
- Import a function from its qualifed name.
Input:
- Qualified name of the function.
Output:
- The imported function.
"""
# Run
imported = import_object('sdv.constraints.base.import_object')
# Assert
assert imported is import_object
class TestConstraint():
def test__identity(self):
"""Test ```Constraint._identity`` method.
``_identity`` method should return whatever it is passed.
Input:
- anything
Output:
- Input
"""
# Run
instance = Constraint('all')
output = instance._identity('input')
# Asserts
assert output == 'input'
def test___init___transform(self):
"""Test ```Constraint.__init__`` method when 'transform' is passed.
If 'transform' is given, the ``__init__`` method should replace the ``is_valid`` method
with an identity and leave ``transform`` and ``reverse_transform`` untouched.
Input:
- transform
Side effects:
- is_valid == identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='transform')
# Asserts
assert instance.filter_valid == instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___reject_sampling(self):
"""Test ``Constraint.__init__`` method when 'reject_sampling' is passed.
If 'reject_sampling' is given, the ``__init__`` method should replace the ``transform``
and ``reverse_transform`` methods with an identity and leave ``is_valid`` untouched.
Input:
- reject_sampling
Side effects:
- is_valid != identity
- transform == identity
- reverse_transform == identity
"""
# Run
instance = Constraint(handling_strategy='reject_sampling')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform == instance._identity
assert instance.reverse_transform == instance._identity
def test___init___all(self):
"""Test ``Constraint.__init__`` method when 'all' is passed.
If 'all' is given, the ``__init__`` method should leave ``transform``,
``reverse_transform`` and ``is_valid`` untouched.
Input:
- all
Side effects:
- is_valid != identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='all')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___not_kown(self):
"""Test ``Constraint.__init__`` method when a not known ``handling_strategy`` is passed.
If a not known ``handling_strategy`` is given, a ValueError is raised.
Input:
- not_known
Side effects:
- ValueError
"""
# Run
with pytest.raises(ValueError):
Constraint(handling_strategy='not_known')
def test_fit(self):
"""Test the ``Constraint.fit`` method.
The base ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._fit = Mock()
# Run
instance.fit(table_data)
# Assert
instance._fit.assert_called_once_with(table_data)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
def test_fit_gaussian_multivariate_correct_distribution(self, gm_mock):
"""Test the ``GaussianMultivariate`` from the ``Constraint.fit`` method.
The ``GaussianMultivariate`` is expected to be called with default distribution
set as ``GaussianUnivariate``.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.assert_called_once_with(distribution=GaussianUnivariate)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
@patch('sdv.constraints.base.HyperTransformer', spec_set=HyperTransformer)
def test_fit_trains_column_model(self, ht_mock, gm_mock):
"""Test the ``Constraint.fit`` method trains the column model.
When ``fit_columns_model`` is True and there are multiple ``constraint_columns``,
the ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
- Create ``_hyper_transformer``.
- Create ``_column_model`` and train it.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.return_value.fit.assert_called_once()
calls = ht_mock.return_value.fit_transform.mock_calls
args = calls[0][1]
assert len(calls) == 1
pd.testing.assert_frame_equal(args[0], table_data)
def test_transform(self):
"""Test the ``Constraint.transform`` method.
It is an identity method for completion, to be optionally
overwritten by subclasses.
The ``Constraint.transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.transform('input')
# Assert
assert output == 'input'
def test_transform_calls__transform(self):
"""Test that the ``Constraint.transform`` method calls ``_transform``.
The ``Constraint.transform`` method is expected to:
- Return value returned by ``_transform``.
Input:
- Anything
Output:
- Result of ``_transform(input)``
"""
# Setup
constraint_mock = Mock()
constraint_mock.fit_columns_model = False
constraint_mock._transform.return_value = 'the_transformed_data'
constraint_mock._validate_columns.return_value = pd.DataFrame()
# Run
output = Constraint.transform(constraint_mock, 'input')
# Assert
assert output == 'the_transformed_data'
def test_transform_model_disabled_any_columns_missing(self):
"""Test the ``Constraint.transform`` method with invalid data.
If ``table_data`` is missing any columns and ``fit_columns_model``
is False, it should raise a ``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform( | pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c']) | pandas.DataFrame |
import dash
from dash import dcc, html, dash_table, callback
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import plotly.graph_objects as go
import plotly.graph_objects as go
import pandas as pd
df = | pd.read_csv("Amazon.csv") | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/3/25 12:42 PM
# @Author : <NAME>
# @File : getwordC.py
import jieba
import pandas as pd
import wordcloud
import matplotlib.pyplot as plt
from imageio import imread
# 读取数据
file_name = '/Users/zhihuyang/IdeaProjects/ddmcData/dataset/result.csv'
df = pd.read_csv(file_name, encoding='utf-8', names=['content'])
backgroud_Image = imread('/Users/zhihuyang/IdeaProjects/ddmcData/wordCloud/img_1.png')
img_colors = wordcloud.ImageColorGenerator(backgroud_Image)
# 清洗数据
# 为了在处理数据时不会因为某条数据有问题,导致整个任务停止,故使用try except continue
df = df.dropna()
content = df["content"].values.tolist()
segment = []
for line in content:
try:
segs = jieba.lcut(line)
for seg in segs:
if len(seg) > 1 and seg != '\r\n\t':
segment.append(seg)
except:
print(line)
continue
# 去除停用词
words_df = | pd.DataFrame({'segment': segment}) | pandas.DataFrame |
from backend.lib import sql_queries
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
def test_get_user_info_for_existing_user(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id == 1
def test_get_user_info_with_wrong_password_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id is None
def test_get_user_info_with_wildcard_email_address_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='*', password='<PASSWORD>')
assert user_id is None
def test_get_user_info_with_wildcard_password_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='*')
assert user_id is None
def test_get_user_info_for_non_existant_user(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id is None
def test_count_input_data_items_for_all_users_and_label_tasks(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['user_id'] = [1, 1, 1, 1, 2, 2, 2, 3, None, None]
df_test['label_task_id'] = [1, 2, 3, 5, 1, 2, 5, 1, 4, 6]
df_test['total_items'] = [5, 3, 1, 5, 5, 2, 5, 1, 1, 1]
df_test['num_unlabeled'] = [2, 2, 1, 5, 4, 2, 5, 0, 1, 1]
df_test['num_labeled'] = [3, 1, 0, 0, 1, 0, 0, 1, 0, 0]
engine = db_connection_sqlalchemy
df = sql_queries.count_input_data_items_per_user_per_label_task(engine, label_task_id=None, user_id=None)
assert_series_equal(df['user_id'], df_test['user_id'])
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
assert_series_equal(df['total_items'], df_test['total_items'])
assert_series_equal(df['num_labeled'], df_test['num_labeled'])
def test_get_all_input_data_items(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['input_data_id'] = [1, 2, 3, 4, 5]
df_test['dataset_group_id'] = [1, 1, 1, 1, 1]
df_test['dataset_id'] = [1, 1, 2, 2, 2]
engine = db_connection_sqlalchemy
df = sql_queries.get_all_input_data_items(engine, label_task_id=1)
assert_frame_equal(df, df_test)
def test_get_all_user_input_data(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [3, 2, 1]
df_test['input_data_id'] = [3, 2, 1]
df_test['user_id'] = [1, 1, 1]
df_test['label_task_id'] = [1, 1, 1]
engine = db_connection_sqlalchemy
df = sql_queries.get_all_user_input_data(engine, user_id=1, label_task_id=1, n=None)
assert_series_equal(df['label_id'], df_test['label_id'])
assert_series_equal(df['input_data_id'], df_test['input_data_id'])
assert_series_equal(df['user_id'], df_test['user_id'])
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
def test_get_all_user_input_data_filtered(refresh_db_once, db_connection_sqlalchemy):
#filter incomplete
df_test = pd.DataFrame()
df_test['label_id'] = [1]
df_test['input_data_id'] = [1]
df_test['user_id'] = [1]
df_test['label_task_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_first_user_input_data(engine, user_id=1, label_task_id=1, label_filter = "filter_incomplete")
print("Received first incomplete entry")
assert_series_equal(df['label_id'], df_test['label_id'])
assert_series_equal(df['input_data_id'], df_test['input_data_id'])
assert_series_equal(df['user_id'], df_test['user_id'])
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
#filter complete
df_test = pd.DataFrame()
df_test['label_id'] = [6]
df_test['input_data_id'] = [4]
df_test['user_id'] = [1]
df_test['label_task_id'] = [3]
engine = db_connection_sqlalchemy
df = sql_queries.get_first_user_input_data(engine, user_id=3, label_task_id=1, label_filter = "filter_complete")
assert len(df) == 0
def test_get_preceding_user_data_item(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [2]
df_test['input_data_id'] = [2]
df_test['user_id'] = [1]
df_test['label_task_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_preceding_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=3)
assert df['label_id'].values == df_test['label_id'].values
assert df['input_data_id'].values == df_test['input_data_id'].values
assert df['user_id'].values == df_test['user_id'].values
assert df['label_task_id'].values == df_test['label_task_id'].values
def test_get_preceding_user_data_item_for_another_input_data_id(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [1]
df_test['input_data_id'] = [1]
df_test['user_id'] = [1]
df_test['label_task_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_preceding_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=2)
assert df['label_id'].values == df_test['label_id'].values
assert df['input_data_id'].values == df_test['input_data_id'].values
assert df['user_id'].values == df_test['user_id'].values
assert df['label_task_id'].values == df_test['label_task_id'].values
def test_get_preceding_user_data_item_if_last_item(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df = sql_queries.get_preceding_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=1)
assert len(df) == 0
def test_get_preceding_user_data_item_if_input_data_id_not_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df = sql_queries.get_preceding_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=28)
assert len(df) == 0
def test_get_next_user_data_item(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [3]
df_test['input_data_id'] = [3]
df_test['user_id'] = [1]
df_test['label_task_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_next_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=2)
assert df['label_id'].values == df_test['label_id'].values
assert df['input_data_id'].values == df_test['input_data_id'].values
assert df['user_id'].values == df_test['user_id'].values
assert df['label_task_id'].values == df_test['label_task_id'].values
def test_get_next_user_data_item_for_another_input_data_id(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [2]
df_test['input_data_id'] = [2]
df_test['user_id'] = [1]
df_test['label_task_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_next_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=1)
assert df['label_id'].values == df_test['label_id'].values
assert df['input_data_id'].values == df_test['input_data_id'].values
assert df['user_id'].values == df_test['user_id'].values
assert df['label_task_id'].values == df_test['label_task_id'].values
def test_get_next_user_data_item_if_first_item(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df = sql_queries.get_next_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=3)
assert len(df) == 0
def test_get_next_user_data_item_if_input_data_id_not_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df = sql_queries.get_next_user_data_item(engine, user_id=1, label_task_id=1, current_input_data_id=28)
assert len(df) == 0
def test_get_next_unlabeled_input_data_item(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df_unlabeled = sql_queries.get_next_unlabeled_input_data_item(engine, label_task_id=5, shuffle=False, n=None)
assert len(df_unlabeled) == 5
assert df_unlabeled['input_data_id'][0] == 1
assert df_unlabeled['input_data_id'][1] == 2
assert df_unlabeled['input_data_id'][2] == 3
assert df_unlabeled['input_data_id'][3] == 4
assert df_unlabeled['input_data_id'][4] == 5
def test_get_next_unlabeled_input_data_item_with_limit(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df_unlabeled = sql_queries.get_next_unlabeled_input_data_item(engine, label_task_id=5, shuffle=False, n=2)
assert len(df_unlabeled) == 2
assert df_unlabeled['input_data_id'][0] == 1
assert df_unlabeled['input_data_id'][1] == 2
def test_get_next_unlabeled_input_data_item_when_some_images_already_labeled_for_a_label_task(refresh_db_once,
db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df_unlabeled = sql_queries.get_next_unlabeled_input_data_item(engine, label_task_id=1, shuffle=False, n=None)
assert len(df_unlabeled) == 1
assert df_unlabeled['input_data_id'][0] == 5
def test_get_next_unlabeled_input_data_item_when_no_images_available(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
df_unlabeled = sql_queries.get_next_unlabeled_input_data_item(engine, label_task_id=6, shuffle=False, n=None)
assert df_unlabeled is None
def test_get_all_input_data_items_if_input_data_id_does_not_exist(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['input_data_id'] = []
df_test['dataset_group_id'] = []
df_test['dataset_id'] = []
engine = db_connection_sqlalchemy
df = sql_queries.get_all_input_data_items(engine, label_task_id=27)
assert len(df) == 0
assert df.columns.tolist() == df_test.columns.tolist()
def test_get_input_data_path(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
input_data_path = sql_queries.get_input_data_path(engine, input_data_id=1)
assert input_data_path == 'test_images/image.jpg'
def test_get_input_data_path_if_input_data_id_does_not_exist(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
input_data_path = sql_queries.get_input_data_path(engine, input_data_id=27)
assert input_data_path is None
def test_get_label_tasks(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_task_id'] = [1, 2, 3, 4, 5, 6]
df_test['dataset_group_id'] = [1, 2, 3, 3, 1, 4]
engine = db_connection_sqlalchemy
df = sql_queries.get_label_tasks(engine)
expected_cols = ['label_task_id',
'dataset_group_id',
'title',
'description',
'type',
'default_tool',
'allowed_tools',
'permit_overlap',
'label_classes',
'enable_advanced_tools']
assert df.columns.tolist() == expected_cols
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
def test_get_label_tasks_for_specific_user(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_task_id'] = [1, 2, 3, 5]
df_test['dataset_group_id'] = [1, 2, 3, 1]
df_test['title'] = ['Rock particle segmentation',
'Rock particle segmentation subset',
'Froth segmentation',
'Rock particle segmentation: Initially unlabeled']
engine = db_connection_sqlalchemy
df = sql_queries.get_label_tasks(engine, user_id=1)
expected_cols = ['label_task_id',
'dataset_group_id',
'title',
'description',
'type',
'default_tool',
'allowed_tools',
'permit_overlap',
'label_classes',
'enable_advanced_tools']
assert df.columns.tolist() == expected_cols
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
def test_get_label_task(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_task_id'] = [1]
df_test['dataset_group_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_label_task(engine, label_task_id=1)
expected_cols = ['label_task_id',
'dataset_group_id',
'title',
'description',
'type',
'default_tool',
'allowed_tools',
'permit_overlap',
'label_classes',
'enable_advanced_tools']
assert df.columns.tolist() == expected_cols
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
def test_get_label_id(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
label_id = sql_queries.get_label_id(engine, user_id=1, label_task_id=1, input_data_id=1)
assert label_id == 1
def test_get_label_id_if_label_does_not_exist(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
label_id = sql_queries.get_label_id(engine, user_id=1, label_task_id=1, input_data_id=27)
assert label_id is None
def test_get_label(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [3]
df_test['input_data_id'] = [3]
df_test['in_progress'] = [False]
df_test['user_complete'] = [False]
df_test['needs_improvement'] = [False]
df_test['admin_complete'] = [False]
df_test['paid'] = [False]
df_test['user_comment'] = [None]
df_test['admin_comment'] = [None]
engine = db_connection_sqlalchemy
df = sql_queries.get_label(engine, user_id=1, label_task_id=1, input_data_id=3)
assert_series_equal(df['label_id'], df_test['label_id'])
assert_series_equal(df['input_data_id'], df_test['input_data_id'])
assert_series_equal(df['in_progress'], df_test['in_progress'])
assert_series_equal(df['user_complete'], df_test['user_complete'])
assert_series_equal(df['needs_improvement'], df_test['needs_improvement'])
assert_series_equal(df['admin_complete'], df_test['admin_complete'])
assert_series_equal(df['paid'], df_test['paid'])
assert_series_equal(df['user_comment'], df_test['user_comment'])
assert_series_equal(df['admin_comment'], df_test['admin_comment'])
def test_get_label_by_id(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [3]
df_test['input_data_id'] = [3]
df_test['in_progress'] = [False]
df_test['user_complete'] = [False]
df_test['needs_improvement'] = [False]
df_test['admin_complete'] = [False]
df_test['paid'] = [False]
df_test['user_comment'] = [None]
df_test['admin_comment'] = [None]
engine = db_connection_sqlalchemy
df = sql_queries.get_label_by_id(engine, label_id=3)
assert_series_equal(df['label_id'], df_test['label_id'])
assert_series_equal(df['input_data_id'], df_test['input_data_id'])
| assert_series_equal(df['in_progress'], df_test['in_progress']) | pandas.testing.assert_series_equal |
import numpy as np
from tenbagger.src.passiveIncome.calculator import PassiveIncomeCalculator
import pandas as pd
class PassiveDividends(PassiveIncomeCalculator):
def __init__(self, port):
super().__init__(port=port)
def calulate_dividends(self, n: int, growth_stock, growth_dividend, monthly_payment, method='proportional',
only_dividend_stocks=False, generate_report: bool = False):
"""
:param n: number of months
:return:
"""
df = self.dist.df.copy()
rate = 1 + growth_stock
rate_dividend = 1 + growth_dividend
if growth_stock < 0 or growth_stock > 1:
raise ValueError("Growth should ben between [0, 1]")
if only_dividend_stocks:
df = df[df['yield'].notna()].copy()
df['percentage'] = df['value'].apply(lambda x: x / sum(df['value']))
else:
df['percentage'] = np.float32(df['percentage'].apply(lambda x: x.replace('%', ''))) / 100
df['monthly'] = monthly_payment
yearly_div = 0
for m in range(0, n + 1):
month = (m % 12) + 1
df['adding_shares'] = (df['percentage'] * monthly_payment) / df['price']
# add the dividends
tmp = self.payout[self.payout.month == month].copy()
df['monthly_dividend'] = 0
df['dripping'] = 0
for ticker in set(tmp.ticker):
amount = df.loc[df.ticker == ticker, 'amount'].values[0]
div = tmp.loc[tmp.ticker == ticker, 'Dividends'].values[0]
df.loc[df.ticker == ticker, 'monthly_dividend'] = amount * div
df['dripping'] = np.where(df.monthly_dividend > 0, df.monthly_dividend / df.price, 0)
df['amount'] += df['adding_shares'] + df['dripping']
yearly_div += df['monthly_dividend'].sum()
df['value'] = df.amount * df.price
data = {'month': [m],
'paid_dividends': [df.monthly_dividend.sum()],
'staking_rewards': [None],
'portfolio_value': [df.value.sum()],
'growth_stock': [growth_stock],
'dividend_growth': [growth_dividend]}
self.df_report = pd.concat([self.df_report, | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import datetime as dt
import numpy as np
from collections import OrderedDict
import os
import pickle
from errorplots import ErrorPlots
class ErrorAnalysis(object):
""" Reads log and output files to analyze errors"""
def __init__(self, train_log_file=None, pred_file=None, period=1, output_field=3):
""" Instantiates the class with the log file and prediction output file
period : prediction period i.e how far out are the predictions in years (1,2,3 etc)
output_field : column to grab in the output file. EBIT is 3
"""
self.train_log_file = train_log_file
self.pred_file = pred_file
self.period = period
self.output_field = output_field
return
def read_train_log(self):
""" Returns mse data from training log file
mse is an ordered dict with epoch as key and (train_mse,validation_mse) as value
"""
if self.train_log_file is None:
print("train log file not provided")
return
mse_data = OrderedDict()
# Iterate through the file
with open(self.train_log_file) as f:
lines = f.readlines()
for line in lines:
line = line.split(' ')
if line[0] == 'Epoch:':
epoch = int(line[1])
train_mse = float(line[4])
valid_mse = float(line[7])
# Add to the mse dict
mse_data[epoch] = (train_mse, valid_mse)
return mse_data
def read_predictions(self):
""" Returns a dict of companies with output and target values# Structure of companies dict
companies : {
gvkey:
period: {
output : { date: output }
target : { date: target}
}
"""
if self.pred_file is None:
print('Predictions file not provided')
return
else:
print('Reading '+self.pred_file)
# initialize the dicts
companies={}
with open(self.pred_file, 'rb') as f:
lines = f.readlines()
for i, line in enumerate(lines):
row = line.split(' ')
try:
date = dt.datetime.strptime(str(row[0]), "%Y%m")
mse_val = float(row[-1].split('=')[-1])
cur_output = float(lines[i + 6].split(' ')[self.output_field])
cur_target = float(lines[i + 7].split(' ')[self.output_field])
if cur_target == 'nan':
cur_target = 0.0
gvkey = row[1]
try:
companies[gvkey][self.period]['output'][date] = cur_output
companies[gvkey][self.period]['target'][date] = cur_target
companies[gvkey][self.period]['mse'][date] = mse_val
except KeyError:
companies[gvkey] = {}
companies[gvkey][self.period] = {}
companies[gvkey][self.period]['output'] = {}
companies[gvkey][self.period]['target'] = {}
companies[gvkey][self.period]['mse'] = {}
except (ValueError, IndexError):
pass
return companies
def get_errors(self, save_csv=False, rel_err_filename='rel_error.csv',mse_err_filename='mse_error.csv'):
""" Returns a dataframe of relative errors where rows are dates and columns are companies
INPUTS
companies: dict returned from read_predictions method
"""
# Read the predictions files to generate company errors
companies = self.read_predictions()
pickle.dump(companies,open('companies.pkl','wb'))
# Initialize dict
rel_err = {}
mse_err = {}
print("Processing Errors...")
for i, key in enumerate(sorted(companies)):
# print(key)
try:
company = companies[key]
p1 = company[1]
out_p1 = sorted(p1['output'].items())
tar_p1 = sorted(p1['target'].items())
mse_p1 = sorted(p1['mse'].items())
x1, y1 = zip(*out_p1)
xt1, yt1 = zip(*tar_p1)
x_mse_1,y_mse_1 = zip(*mse_p1)
rel_err[key] = abs(np.divide(np.array(y1) - np.array(yt1), np.array(yt1)))
mse_err[key] = np.array(y_mse_1)
df_tmp = pd.DataFrame(data=rel_err[key], index=x1, columns=[key])
df_tmp_mse = pd.DataFrame(data=mse_err[key], index=x1, columns=[key])
df_tmp = df_tmp.replace([np.inf, -np.inf], np.nan)
df_tmp_mse = df_tmp_mse.replace([np.inf, -np.inf], np.nan)
df_tmp = df_tmp.dropna()
df_tmp_mse = df_tmp_mse.dropna()
if i == 0:
df = df_tmp
df_mse = df_tmp_mse
else:
df = | pd.merge(df, df_tmp, how='outer', left_index=True, right_index=True) | pandas.merge |
import sciwing.constants as constants
from sciwing.metrics.precision_recall_fmeasure import PrecisionRecallFMeasure
from sciwing.infer.classification.BaseClassificationInference import (
BaseClassificationInference,
)
from sciwing.data.datasets_manager import DatasetsManager
from deprecated import deprecated
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
from typing import Any, Dict, List
import pandas as pd
from sciwing.data.line import Line
from sciwing.data.label import Label
from wasabi.util import MESSAGES
FILES = constants.FILES
SECT_LABEL_FILE = FILES["SECT_LABEL_FILE"]
class ClassificationInference(BaseClassificationInference):
"""
The sciwing engine runs the test lines through the classifier
and returns the predictions/probabilities for different classes
At a later point in time this method should be able to take any
context of lines (may be from a file) and produce the output.
This class also helps in performing various interactions with
the results on the test dataset.
Some features are
1) Show confusion matrix
2) Investigate a particular example in the test dataset
3) Get instances that were classified as 2 when their true label is 1 and others
All it needs is the configuration file stored under every experiment to have a
vocab already stored in the experiment folder
"""
def __init__(
self,
model: nn.Module,
model_filepath: str,
datasets_manager: DatasetsManager,
tokens_namespace: str = "tokens",
normalized_probs_namespace: str = "normalized_probs",
):
super(ClassificationInference, self).__init__(
model=model,
model_filepath=model_filepath,
datasets_manager=datasets_manager,
)
self.batch_size = 32
self.tokens_namespace = tokens_namespace
self.normalized_probs_namespace = normalized_probs_namespace
self.label_namespace = self.datasets_manager.label_namespaces[0]
self.labelname2idx_mapping = self.datasets_manager.get_label_idx_mapping(
label_namespace=self.label_namespace
)
self.idx2labelname_mapping = self.datasets_manager.get_idx_label_mapping(
label_namespace=self.label_namespace
)
self.load_model()
self.metrics_calculator = PrecisionRecallFMeasure(
datasets_manager=datasets_manager
)
self.output_analytics = None
# create a dataframe with all the information
self.output_df = None
def run_inference(self) -> Dict[str, Any]:
with self.msg_printer.loading(text="Running inference on test data"):
loader = DataLoader(
dataset=self.datasets_manager.test_dataset,
batch_size=self.batch_size,
shuffle=False,
collate_fn=list,
)
output_analytics = {}
# contains the predicted class names for all the instances
pred_class_names = []
true_class_names = [] # contains the true class names for all the instances
sentences = [] # batch sentences in english
true_labels_indices = []
predicted_labels_indices = []
all_pred_probs = []
self.metrics_calculator.reset()
for lines_labels in loader:
lines_labels = list(zip(*lines_labels))
lines = lines_labels[0]
labels = lines_labels[1]
batch_sentences = [line.text for line in lines]
model_output_dict = self.model_forward_on_lines(lines=lines)
normalized_probs = model_output_dict[self.normalized_probs_namespace]
self.metrics_calculator.calc_metric(
lines=lines, labels=labels, model_forward_dict=model_output_dict
)
true_label_ind, true_label_names = self.get_true_label_indices_names(
labels=labels
)
(
pred_label_indices,
pred_label_names,
) = self.model_output_dict_to_prediction_indices_names(
model_output_dict=model_output_dict
)
true_label_ind = torch.LongTensor(true_label_ind)
true_labels_indices.append(true_label_ind)
true_class_names.extend(true_label_names)
predicted_labels_indices.extend(pred_label_indices)
pred_class_names.extend(pred_label_names)
sentences.extend(batch_sentences)
all_pred_probs.append(normalized_probs)
# contains predicted probs for all the instances
all_pred_probs = torch.cat(all_pred_probs, dim=0)
true_labels_indices = torch.cat(true_labels_indices, dim=0).squeeze()
# torch.LongTensor N, 1
output_analytics["true_labels_indices"] = true_labels_indices
output_analytics["predicted_labels_indices"] = predicted_labels_indices
output_analytics["pred_class_names"] = pred_class_names
output_analytics["true_class_names"] = true_class_names
output_analytics["sentences"] = sentences
output_analytics["all_pred_probs"] = all_pred_probs
self.msg_printer.good(title="Finished running inference")
return output_analytics
def model_forward_on_lines(self, lines: List[Line]):
with torch.no_grad():
model_output_dict = self.model(
lines=lines, is_training=False, is_validation=False, is_test=True
)
return model_output_dict
def get_misclassified_sentences(self, true_label_idx: int, pred_label_idx: int):
"""This returns the true label misclassified as
pred label idx
Parameters
----------
true_label_idx : int
The label index of the true class name
pred_label_idx : int
The label index of the predicted class name
Returns
-------
List[str]
A list of strings where the true class is classified as pred class.
"""
instances_idx = self.output_df[
self.output_df["true_labels_indices"].isin([true_label_idx])
& self.output_df["predicted_labels_indices"].isin([pred_label_idx])
].index.tolist()
for idx in instances_idx:
sentence = self.output_analytics["sentences"][idx]
if true_label_idx != pred_label_idx:
stylized_sentence = self.msg_printer.text(
title=sentence,
icon=MESSAGES.FAIL,
color=MESSAGES.FAIL,
no_print=True,
)
else:
stylized_sentence = self.msg_printer.text(
title=sentence,
icon=MESSAGES.GOOD,
color=MESSAGES.GOOD,
no_print=True,
)
print(stylized_sentence)
def print_confusion_matrix(self) -> None:
""" Prints the confusion matrix for the test dataset
"""
self.metrics_calculator.print_confusion_metrics(
predicted_probs=self.output_analytics["all_pred_probs"],
labels=self.output_analytics["true_labels_indices"].unsqueeze(1),
)
def report_metrics(self):
metrics = self.metrics_calculator.report_metrics()
for namespace, table in metrics.items():
self.msg_printer.divider(f"Results for {namespace.upper()}")
print(table)
@deprecated(reason="This method is deprecated. It will be removed in version 0.1")
def generate_report_for_paper(self):
""" Generates just the fscore to be used in reporting on print
"""
paper_report = self.metrics_calculator.report_metrics(report_type="paper")
class_numbers = sorted(self.idx2labelname_mapping.keys(), reverse=False)
row_names = [
f"class_{class_num} - ({self.idx2labelname_mapping[class_num]})"
for class_num in class_numbers
]
row_names.extend([f"Micro-Fscore", f"Macro-Fscore"])
return paper_report, row_names
def model_output_dict_to_prediction_indices_names(
self, model_output_dict: Dict[str, Any]
) -> (List[int], List[str]):
normalized_probs = model_output_dict["normalized_probs"]
pred_probs, pred_indices = torch.topk(normalized_probs, k=1, dim=1)
pred_indices = pred_indices.squeeze(1).tolist()
pred_classnames = [
self.idx2labelname_mapping[pred_index] for pred_index in pred_indices
]
return pred_indices, pred_classnames
def infer_batch(self, lines: List[str]) -> List[str]:
""" Runs inference on a batch of lines
This method can be used for applications. When APIS are being developed
to serve over the web or when terminal applications are being written
to read from files and infer, this method comes in handy
Parameters
----------
lines : List[str]
List of text spans to be infered
Returns
-------
List[str]
Reutrns the class names for all the sentences in the input
"""
lines = [self.datasets_manager.make_line(line=line) for line in lines]
model_output_dict = self.model_forward_on_lines(lines=lines)
_, pred_classnames = self.model_output_dict_to_prediction_indices_names(
model_output_dict=model_output_dict
)
return pred_classnames
def on_user_input(self, line: str) -> str:
""" Runs the inference when the user inputs a single sentence either on the terminal
or some other application
Parameters
----------
line : str
The line entered by the user
Returns
-------
str
The class label that is infered for the user input
"""
return self.infer_batch(lines=[line])[0]
def get_true_label_indices_names(
self, labels: List[Label]
) -> (List[int], List[str]):
label_names = [label.text for label in labels]
label_indices = [
self.labelname2idx_mapping[label_name] for label_name in label_names
]
return label_indices, label_names
def run_test(self):
""" Runs inference and reports test metrics
"""
self.output_analytics = self.run_inference()
self.output_df = | pd.DataFrame(self.output_analytics) | pandas.DataFrame |
import warnings
from collections import OrderedDict
from datetime import time
import tables as tb
import pandas as pd
import pandas.lib as lib
import numpy as np
import pandas.io.pytables as pdtables
from trtools.compat import izip, pickle
from trtools.io.common import _filename
from trtools.io.table_indexing import create_slices
MIN_ITEMSIZE = 10
class MismatchColumnsError(Exception):
pass
def convert_frame(df):
"""
Input: DataFrame
Output: pytable table description and pytable compatible recarray
"""
sdict = OrderedDict()
atoms = OrderedDict()
types = OrderedDict()
#index
index_name = df.index.name or 'pd_index'
converted, inferred_type, atom = _convert_obj(df.index)
atoms[index_name] = atom
sdict[index_name] = converted
types[index_name] = inferred_type
# columns
for col in df.columns:
converted, inferred_type, atom = _convert_obj(df[col])
atoms[col] = atom
sdict[col] = converted
types[col] = inferred_type
# create table desc
desc = OrderedDict()
for pos, data in enumerate(atoms.items()):
k, atom = data
col = tb.Col.from_atom(atom, pos=pos)
desc[str(k)] = col
# create recarray
dtypes = [(str(k), v.dtype) for k, v in list(sdict.items())]
recs = np.recarray(shape=len(df), dtype=dtypes)
for k, v in list(sdict.items()):
recs[str(k)] = v
return desc, recs, types
def _convert_obj(obj):
"""
Convert a series to pytables values and Atom
"""
if isinstance(obj, pd.DatetimeIndex):
converted = obj.asi8
return converted, 'datetime64', tb.Int64Atom()
elif isinstance(obj, pd.PeriodIndex):
converted = obj.values
return converted, 'periodindex', tb.Int64Atom()
elif isinstance(obj, pd.PeriodIndex):
converted = obj.values
return converted, 'int64', tb.Int64Atom()
inferred_type = lib.infer_dtype(obj)
values = np.asarray(obj)
if inferred_type == 'datetime64':
converted = values.view('i8')
return converted, inferred_type, tb.Int64Atom()
if inferred_type == 'string':
# TODO, am I doing this right?
converted = np.array(list(values), dtype=np.bytes_)
itemsize = converted.dtype.itemsize
# for OBT, can't assume value will be right for future
# frame keys
if itemsize < MIN_ITEMSIZE:
itemsize = MIN_ITEMSIZE
converted = converted.astype("S{0}".format(itemsize))
return converted, inferred_type, tb.StringAtom(itemsize)
elif inferred_type == 'unicode':
# table's don't seem to support objects
raise Exception("Unsupported inferred_type {0}".format(inferred_type))
converted = np.asarray(values, dtype='O')
return converted, inferred_type, tb.ObjectAtom()
elif inferred_type == 'datetime':
converted = np.array([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return converted, inferred_type, tb.Time64Atom()
elif inferred_type == 'integer':
converted = np.asarray(values, dtype=np.int64)
return converted, inferred_type, tb.Int64Atom()
elif inferred_type == 'floating':
converted = np.asarray(values, dtype=np.float64)
return converted, inferred_type, tb.Float64Atom()
raise Exception("Unsupported inferred_type {0} {1}".format(inferred_type, str(values[-5:])))
def _handle(obj):
if isinstance(obj, tb.file.File):
handle = obj
else:
handle = obj._v_file
return _wrap(handle)
def _meta(obj, meta=None):
obj = _unwrap(obj)
if isinstance(obj, tb.file.File):
obj = obj.root
return _meta_file(obj, meta)
handle = _handle(obj)
type = handle.type
if type == 'directory':
return _meta_dir(obj, meta)
return _meta_file(obj, meta)
def _meta_file(obj, meta):
if meta:
obj._v_attrs.pd_meta = meta
return
try:
meta = obj._v_attrs.pd_meta
if isinstance(meta, str):
meta = pickle.loads(meta)
return meta
except:
return {}
def _meta_path(obj):
import os.path
dir = os.path.dirname(obj._v_file.filename)
filename = obj._v_pathname[1:]
bits = filename.split('/')
bits.append('meta')
filename = ".".join(bits)
filepath = os.path.join(dir, filename)
return filepath
def _meta_dir(obj, meta=None):
filepath = _meta_path(obj)
if meta:
with open(filepath, 'wb') as f:
pickle.dump(meta, f)
return
try:
with open(filepath, 'rb') as f:
meta = pickle.load(f)
return meta
except:
return {}
def _name(table):
try:
name = table.attrs.pandas_name
except:
name = table._v_name
return name
def _columns(table):
try:
columns = list(_meta(table)['columns'])
except:
# assume first is index
columns = table.colnames[1:]
return columns
def _index_name(obj):
if isinstance(obj, pd.DataFrame):
return _index_name_frame(obj)
return _index_name_table(obj)
def _index_name_table(table):
try:
index_name = _meta(table)['index_name']
except:
# assume first is index
index_name = table.colnames[0]
return index_name
def _index_name_frame(df):
#TODO support multiindex
index = df.index
def unconvert_obj(values, type):
if type == 'datetime64':
return values.astype("M8[ns]")
if type == 'string':
return values.astype(np.unicode_)
return values
def unconvert_index(index_values, type):
return pdtables._unconvert_index(index_values, type)
def create_table(group, name, desc, types, filters=None, expectedrows=None, title=None, columns=None, index_name=None, extra_meta=None):
if title is None:
title = name
with warnings.catch_warnings(): # ignore the name warnings
table = group._v_file.createTable(group, name, desc, title,
expectedrows=expectedrows, filters=filters)
meta = {}
meta['columns'] = columns or list(desc.keys())
meta['value_types'] = types
meta['index_name'] = index_name
meta['name'] = name
if extra_meta:
meta.update(extra_meta)
_meta(table, meta)
return table
def frame_to_table(name, df, group, filters=None, expectedrows=None, create_only=False, *args, **kwargs):
"""
create_only will create the table but not appending the DF.
Since the machinery for figuring out a table definition and converting values for
appending are the same.
"""
# TODO: potentially could change this to subset the DF so we don't convert and iterate over all
# the values
hfile = group._v_file
# kind of a kludge to get series to work
if isinstance(df, pd.Series):
series_name = 'vals'
df = pd.DataFrame({series_name:df}, index=df.index)
desc, recs, types = convert_frame(df)
columns = list(df.columns)
index_name = df.index.name or 'pd_index'
table = create_table(group, name, desc, types, filters=filters, columns=columns,
expectedrows=expectedrows, index_name=index_name,*args, **kwargs)
if not create_only:
table.append(recs)
hfile.flush()
def table_to_frame(table, where=None):
"""
Simple converison of table to DataFrame
"""
if where:
try:
data = table_where(table, where)
except Exception as err:
raise Exception("readWhere error: {0} {1}".format(where, str(err)))
else:
data = table.read()
df = table_data_to_frame(data, table)
return df
def copy_table_def(group, name, orig):
table_meta = _meta(orig)
desc = orig.description
types = table_meta['value_types']
index_name = table_meta['index_name']
columns = table_meta['columns']
expectedrows = orig.nrows
table = group.create_table(name, desc, types, columns=columns, index_name=index_name, expectedrows=expectedrows)
return table
def table_where(table, where):
"""
Optimized Where
"""
return table.readWhere(where)
def get_table_index(table, index_name=None, types=None):
"""
Get the pandas index from a pytable
"""
if index_name is None:
index_name = _index_name(table)
if index_name is None: #neither passed in or set in meta
return None
if types is None:
meta = _meta(table)
types = meta.setdefault('value_types', {})
index_values = table.col(index_name)
index = unconvert_index(index_values, types[index_name])
return index
def _data_names(data):
if hasattr(data, 'keys'):
return list(data.keys())
if hasattr(data, 'dtype'):
return data.dtype.names
def table_data_to_frame(data, table, columns=None):
"""
Given the pytables.recarray data and the metadata taken from table,
create a DataFrame
"""
columns = columns or _columns(table)
index_name = _index_name(table)
name = _name(table)
meta = _meta(table)
types = meta.setdefault('value_types', {})
index = None
if index_name:
if index_name not in _data_names(data): # handle case where we dont send index with data
index_values = table.col(index_name)
else:
index_values = data[index_name]
index = unconvert_index(index_values, types[index_name])
try:
columns.remove(index_name)
except ValueError:
pass
sdict = {}
for col in columns:
# recarrays have only str columns
temp = data[str(col)]
temp = unconvert_obj(temp, types[col])
sdict[col] = temp
df = | pd.DataFrame(sdict, columns=columns, index=index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Routine to read water quality data of different formats and transform to HGC format
<NAME>, <NAME>
KWR, April-July 2020
Edit history: 24-08-2020: by Xin, check unit conversion,
"""
import copy
import logging
import numpy as np
import pandas as pd
# from unit_converter.converter import convert, converts
import molmass
from pathlib import Path
from hgc import constants # for reading the default csv files
from hgc.constants.constants import mw, units_wt_as
# %% hgc.io.defaults
def default_feature_units():
"""
Generate a dictionary with the desired prefixes-units (values) for all feature defined in HGC (keys).
Returns
-------
Dictionary
Example
-------
{'Fe': 'mg/L', 'Al': 'µg/L'}
"""
# load default alias table
df = pd.read_csv(Path(constants.__file__).parent / 'default_features_alias.csv', encoding='utf-8', header=0)
mask = ~(df['DefaultUnits'].isnull())
dct_alias = dict(zip(df['Feature'][mask], df['DefaultUnits'][mask]))
# extract feature names and get units defined in HGC
feature = df['Feature']
DefaultUnits = [units_wt_as(key) for key in feature] # OR use command: list(map(units_wt_as, feature))
dct_hgc = {k: v for k, v in dict(zip(feature, DefaultUnits)).items() if v is not None}
# combine dictionaries for default units. If defined in HGC, use it. Otherwise use whatever defined in the alias table.
dct = {**dct_alias,
**dct_hgc,
'ph_field': '1', # give pH unit 1 to prevent error --> check in the future release
'ph_lab': '1',
'ph': '1',
}
return dct
def default_unit_conversion_factor():
"""
Generate a dictionary of conversion factors for several unit-prefixes.
Returns
-------
Dictionary :
Conversion factors (values) for several unit-prefixes (keys).
Example
-------
{'g/L': 1., 'mg/L': 0.001, 'μg/L': 1e-6}
"""
# units, prefix and conversion factors generated by this function
units = ['1', 'm', 'g', 'L', 'S', 'V', 'mol']
prefixes = {'p': 1e-12, 'n': 1e-9, 'μ': 1e-6, 'm': 1e-3, 'c': 0.01, 'k': 1000,
'100m' : 0.1, '50m': 0.05}
# make a dictionary of all combinations of units/ prefixes (keys) and
# conversion values (values) {mL: 0.001, L: 1, etc.}
dct = dict(zip(units, len(units) * [1.])) # units without prefix get conversion factor "1"
for unit in units: # loop through all combinations of units and prefixes
for key, value in prefixes.items():
dct[key + unit] = value
dct = {**dct, '%': .01} # for correction percentage
return dct
# @Tin, MartinK:
# The following defaults need to be called (and adjusted) by the user.
# For example: header_format = {**default_header_format(), locationID: 'integer'}}.
# Is it necessary to define them as functions e.g. default_na_values()?
# or can they be constants e.g. DEFAULT_NA_VALUES?
def default_column_dtype():
"""
Generate a dictionary with the default data type of columns.
Returns
-------
Dictionary
Header or column name (keys) and dtype (values)
Default
-------
{
'LocationID': 'str',
'Datetime': 'datetime',
'SampleID': 'str',
'Feature': 'str',
'Unit': 'str',
'Value': 'float64',
}
"""
dct = {
'LocationID': 'str',
'Datetime': 'datetime',
'SampleID': 'str',
'Feature': 'str',
'Unit': 'str',
'Value': 'float64',
}
return dct
def default_map_header():
"""
Generate a dictionary with the mapping of headers in the original file (keys) equal headings in output df (values).
Returns
-------
Dictionary
Header or column name in original file (keys) and header of output df (values).
Default
-------
{
'LocationID': 'LocationID',
'Datetime': 'Datetime',
'SampleID': 'SampleID',
'Feature': 'Feature',
'Unit': 'Unit',
'Value': 'Value',
}
"""
dct = {
'LocationID': 'LocationID',
'Datetime': 'Datetime',
'SampleID': 'SampleID',
'Feature': 'Feature',
'Unit': 'Unit',
'Value': 'Value',
}
return dct
def default_na_values():
"""
Generate list of values that are recognized as NaN.
The list is based on the default values of python, but with 'NA' left
out to prevent NA (Sodium) being read as NaN
Returns
-------
List
"""
lst = ['#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',
'1.#IND', '1.#QNAN', 'N/A', 'NULL', 'NaN', 'n/a', 'nan', 'null']
return lst
# %% HGC.IO utils
def read_file(file_path='', sheet_name=0, na_values=[], encoding='', delimiter=None, **kwargs):
"""Read excel of csv file."""
file_extension = file_path.split('.')[-1]
if (file_extension == 'xlsx') or (file_extension == 'xls'):
try:
df = pd.read_excel(file_path,
sheet_name=sheet_name,
header=None,
index_col=None,
na_values=na_values,
keep_default_na=False)
# logger.info('file read: ' + file_path)
except:
df = | pd.DataFrame() | pandas.DataFrame |
"""Unittests for the functions in raw, using example datasets."""
import unittest
import pandas.testing as pt
import pandas as pd
from io import StringIO
from gnssmapper import log
import gnssmapper.common.time as tm
import gnssmapper.common.constants as cn
class TestReadCSV(unittest.TestCase):
def setUp(self):
self.filedir = "./tests/data/"
self.filepath = self.filedir+"log_20200211.txt"
def test_read_csv_(self) -> None:
raw_var,fix = log.read_csv_(self.filepath)
raw_expected = pd.DataFrame({
'TimeNanos': [34554000000],
'FullBiasNanos':[-1265446151445559028],
'Svid': [2],
'ConstellationType': [1],
'State': [16431],
'Cn0DbHz': [22.340620040893555]}).convert_dtypes(convert_floating=False,convert_boolean=False,convert_string=False)
fix_expected = pd.DataFrame({
'Latitude': [51.524707],
'Longitude': [-0.134140],
'Altitude': [114.858938],
'(UTC)TimeInMs': [1581410967999]
}).convert_dtypes(convert_floating=False,convert_boolean=False,convert_string=False)
pt.assert_frame_equal(
raw_var.loc[0:0, ['TimeNanos','FullBiasNanos','Svid', 'ConstellationType','State','Cn0DbHz']],
raw_expected)
pt.assert_frame_equal(
fix.loc[0:0, ['Latitude', 'Longitude', 'Altitude', '(UTC)TimeInMs']],
fix_expected
)
def test_platform(self) -> None:
#copy of log.txt with platform replaced by 6
wrong_platform = self.filedir+"wrong_platform.txt"
self.assertWarnsRegex(UserWarning,"Platform 6 found in log file",log.read_csv_,wrong_platform)
def test_version(self) -> None:
#copy of log.txt with version replaced by 1.3.9.9
wrong_version = self.filedir+"wrong_version.txt"
self.assertRaisesRegex(ValueError,"Version 1.3.9.9 found in log file",log.read_csv_,wrong_version)
def test_compare_version(self) -> None:
low = "1.3"
high = "1.4"
expected = "1.4.0.0"
self.assertTrue(log._compare_version(high, expected))
self.assertFalse(log._compare_version(low, expected))
def test_compare_platform(self) -> None:
low = set(["6","M",6])
high = set(["7","N",7,"O",10,"10"])
expected = "7"
self.assertTrue(all([log._compare_platform(x, expected) for x in high]))
self.assertFalse(any([log._compare_platform(x, expected) for x in low]))
class TestProcessRaw(unittest.TestCase):
def setUp(self):
# received 0.1 second after start of week
rx_ = pd.DataFrame(
{'week': [2000], 'day': [0], 'time': [1 * 10 ** 8]})
# transmitted at 0.01 second after start of week
tx_ = pd.DataFrame(
{'week': [2000], 'day': [0], 'time': [1 * 10 ** 7]})
d = {'ConstellationType': [1],
'Svid': [1],
'TimeNanos': tm.gpsweek_to_gps(rx_.week, rx_.day, rx_.time),
'FullBiasNanos': [0],
'ReceivedSvTimeNanos': tm.gpsweek_to_gps(0, tx_.day, tx_.time),
'State': [9]}
self.input = pd.DataFrame(d)
self.tx_gps = tm.gpsweek_to_gps(tx_.week, tx_.day, tx_.time).convert_dtypes()
self.rx = rx_
def test_galileo_ambiguity(self) -> None:
import numpy as np
expected = np.array([6, 7, 8, 9, 10])*cn.nanos_in_period['E']
testdata = np.array([1, 2, 3, 4, 5])+expected
np.testing.assert_array_equal(
log.galileo_ambiguity(testdata),
expected)
def test_period_start_time(self) -> None:
import numpy as np
rx = self.input.TimeNanos[0]
state = 9
constellation = 'G'
expected = tm.gpsweek_to_gps(self.rx.week,
| pd.Series([0]) | pandas.Series |
import copy
import logging
import pandas as pd
from pandas import DataFrame, Series, Int64Index
from roughsets_base.roughset_si import RoughSetSI
class RoughSetDT(RoughSetSI):
"""Class RoughSet to model a decision table (DT).
DT = f(X, A, y),
where:
X - objects of universe,
A - attributes describing objects of X,
y - a decision attribute related to X.
"""
def __init__(self, X: DataFrame, y: Series = None, ind_index_name="IND_INDEX"):
"""Initialize object of class RoughSet
Parameters
----------
X: DataFrame
Objects of universe of type: pandas DataFrame
y: Series
Decision set related to X or None if used simple SI
ind_index_name: string, default 'IND_INDEX'
Name of a special column to store index of discernibilty relation,
computed by the function: get_indiscernibility_relations function.
Note: X and y are computed as data structures with nominal values.
References
----------
pandas array: https://pandas.pydata.org/docs/reference/arrays.html
"""
super().__init__(X, ind_index_name)
self.default_class_attr = "target"
if isinstance(y, list):
y = pd.Series(y, name=self.default_class_attr)
self.__assert_X_y(X, y)
self.y = y
if self.ind_rel_column_index_name in self.X.columns:
raise ValueError(f"You can not use {self.ind_rel_column_index_name} as a column name.")
self.ind_index_name = ind_index_name # nazwa kolumny pomocniczej dla relacji nieodróżnialności
def __assert_X_y(self, X, y):
if not isinstance(y, Series):
raise Exception("y must be a type of list or Pandas Series. See more: https://pandas.pydata.org/docs/reference/api/pandas.Series.html")
if not isinstance(X, DataFrame):
raise Exception("X must be a type of Pandas DataFrame. See more: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html")
if not len(X.index) == len(y.index):
raise Exception("Number of objects in X does not match number of decisions in y.")
def concat_X_and_y(self, X, y) -> DataFrame:
"""Add y series as a column to X DataFrame"""
Xy = | pd.concat([X, y], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, precision_recall_curve, auc
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def viz_train_val_data(hist_scores, model_str, model_timestamp):
# Plot training & validation metrics
loss_train, kl_train, acc_train, ap_train, roc_train, loss_val, acc_val, ap_val, roc_val = hist_scores
figure, axis = plt.subplots(2,2)
axis[0, 0].plot(loss_train)
axis[0, 0].plot(loss_val, color='tab:orange')
axis[0, 0].set_title('Total loss')
axis[0, 0].set_xlabel('Epoch')
axis[0, 1].plot(ap_train)
axis[0, 1].plot(ap_val, color='tab:orange')
#axis[0, 1].set_ylim([0.5, 1.0])
axis[0, 1].set_title('Average Precision')
axis[0, 1].set_xlabel('Epoch')
axis[1, 0].plot([np.subtract(x1, x2) for (x1, x2) in zip(loss_train[1:], kl_train[1:])])
axis[1, 0].set_title('Recon loss')
axis[1, 0].set_xlabel('Epoch')
if model_str == 'gcn_vae':
axis2 = axis[1, 0].twinx()
axis2.plot(kl_train, color='tab:orange')
axis[1, 0].set_title('Recon/KL loss')
axis[1, 1].plot(roc_train)
axis[1, 1].plot(roc_val, color='tab:orange')
#axis[1, 1].set_ylim([0.5, 1.0])
axis[1, 1].set_title('ROC AUC')
axis[1, 1].set_xlabel('Epoch')
figure.tight_layout()
figure.savefig('logs/training_plots/' + model_timestamp + '_training_history.png', dpi=300)
plt.close(figure)
def viz_roc_pr_curve(y_pred, y_true, model_timestamp):
figure, axis = plt.subplots(1,3)
figure.set_size_inches(12.8, 4.8)
#plot ROC curve
fpr, tpr, thresholds = roc_curve(1-y_true, 1-y_pred)
axis[0].plot(fpr, tpr, label="negative class: auc="+str(np.round(auc(fpr, tpr),2)))
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
axis[0].plot(fpr, tpr, label="positive class: auc="+str(np.round(auc(fpr, tpr),2)))
thresholdOpt, _, fprOpt, tprOpt = max_gmean_thresh(fpr, tpr, thresholds)
axis[0].plot(fprOpt, tprOpt, 'ro', label=('max g-mean threshold: ' + str(thresholdOpt)))
axis[0].set_title("ROC curve")
axis[0].set_xlabel("False Positive Rate")
axis[0].set_ylabel("True Positive Rate")
axis[0].set_xlim([0, 1])
axis[0].set_ylim([0, 1])
axis[0].legend(loc=4)
#plot PR curve
precision, recall, thresholds = precision_recall_curve(1-y_true, 1-y_pred)
axis[1].plot(recall, precision, label="negative class: auc="+str(np.round(auc(recall, precision),2)))
precision, recall, thresholds = precision_recall_curve(y_true, y_pred)
axis[1].plot(recall, precision, label="positive class: auc="+str(np.round(auc(recall, precision),2)))
axis[1].set_title("PR curve")
axis[1].set_xlabel("Recall")
axis[1].set_ylabel("Precision")
axis[1].set_xlim([0, 1])
axis[1].set_ylim([0, 1])
axis[1].legend(loc=2)
#plot histogram
axis[2].hist(y_pred, bins=100)
axis[2].set_title("Histogram of predictions (" + str(len(y_pred)) + ")")
axis[2].set_xlabel("Prediction")
axis[2].set_ylabel("Count")
axis[2].set_xlim([0, 1])
figure.tight_layout()
figure.savefig('logs/training_plots/' + model_timestamp + '_ROC_PR_curve.png', dpi=300)
plt.close(figure)
def max_gmean_thresh(fpr, tpr, thresholds):
gmean = np.sqrt(tpr * (1 - fpr))
index = np.argmax(gmean)
thresholdOpt = np.round(thresholds[index], 2)
gmeanOpt = np.round(gmean[index], 2)
fprOpt = np.round(fpr[index], 2)
tprOpt = np.round(tpr[index], 2)
return thresholdOpt, gmeanOpt, fprOpt, tprOpt
def save_adj(adj, outPath, model_timestamp, gene_names):
if outPath is None:
outPath = "logs/outputs/" + model_timestamp + '_'
#save adjacency matrix
np.savetxt(outPath + 'adj_pred.csv', adj, delimiter=",")
#save gene interaction list
adj_df = | pd.DataFrame(data=adj, index=gene_names, columns=gene_names) | pandas.DataFrame |
import os
import requests
import warnings
import numpy as np
import pandas as pd
from functools import reduce
from io import BytesIO
import sys
sys.path.append("..")
NUM_TESS_SECTORS = 27
TESS_DATAPATH = os.path.abspath(os.path.dirname(os.getcwd())) + "/data/tesstargets/" # or change
assert TESS_DATAPATH[-1] == os.path.sep, "must end datapath with {}".format(os.path.sep)
def get_tess_stars_from_sector(sector_num, datapath=TESS_DATAPATH, subpath=None, force_redownload=False, verbose=True):
'''
Queries https://tess.mit.edu/observations/target-lists/ for the input catalog from TESS sector 'sector_num',
and for each target in that list, gets its data from astroquery and joins the two catalogs.
Arguments
---------
sector_num : int
The TESS sector number for which information is being requested.
datapath : str
The top-level path to which data should be stored.
subpath : str
The subdirectory (datapath/subpath) to which data should be stored; will create it if it doesn't already exist.
verbose : bool
Whether to print statements on the script's progress.
Returns
-------
stars : pd.DataFrame
The joined TIC and target-list data.
'''
from astroquery.mast import Catalogs
# sets up file paths and names
sector = str(sector_num).zfill(3)
if datapath is None:
datapath = os.getcwd()
if subpath is None:
subpath = "TESS_targets_S{}.csv".format(sector)
fullpath = os.path.join(datapath, subpath)
if (not os.path.exists(fullpath)) or force_redownload:
# queries the target list
url = 'https://tess.mit.edu/wp-content/uploads/all_targets_S{}_v1.csv'.format(sector)
if verbose:
print("Getting sector {0} observed targets from {1}.".format(sector_num, url))
req = requests.get(url)
if not req.ok:
raise requests.exceptions.HTTPError("Data from sector {} is not available.".format(sector_num))
observations = pd.read_csv(BytesIO(req.content), comment='#')[['TICID', 'Camera', 'CCD']] # MAST has Tmag, RA, Dec at higher precision
observed_ticids = observations['TICID'].values
# queries MAST for stellar data
if verbose:
print("Querying MAST for sector {0} observed targets.".format(sector_num))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tic_data = Catalogs.query_criteria(catalog='Tic', ID=observed_ticids).to_pandas()
# tic_data = tic_data.fillna(-1).astype({'ID': int, 'HIP' : int, 'KIC' : int, 'numcont' : int})
tic_data = tic_data.astype({"ID" : int})
merged_data = tic_data.merge(observations, left_on='ID', right_on='TICID')
noises_path = os.path.join(datapath, "TESS_noise_S{}.csv".format(sector))
if os.path.exists(noises_path):
merged_data = merged_data.merge(pd.read_csv(noises_path, index_col=0, comment='#'), on="ID")
else:
print("Noise values not found on path: change file location or download using get_tess_photometric_noise.py.")
merged_data = merged_data.rename({"ID" : "ticid"})
merged_data.to_csv(fullpath)
if verbose:
print("Saved TIC data from TESS sector {0} to path {1}.".format(sector_num, fullpath))
return merged_data
else:
return pd.read_csv(fullpath, index_col=0)
def get_stellar_data(sectors=True):
'''
Utility function to call get_tess_stars_from_sector on a specific directory.
Arguments
---------
sectors : bool, int, or list of ints
True for 'all available', an int for just one sector, or a list of ints for a subset of sectors.
'''
if sectors is True:
i = 1
while True:
try:
get_tess_stars_from_sector(i, datapath=TESS_DATAPATH)
print()
i += 1
except requests.exceptions.HTTPError:
break
elif isinstance(sectors, int):
get_tess_stars_from_sector(sectors, datapath=TESS_DATAPATH)
elif isinstance(sectors, list):
for s in sectors:
get_tess_stars_from_sector(s, datapath=TESS_DATAPATH)
print()
else:
print("Datatype of 'sectors' not understood: set to either True, an integer, or a list of integers.")
def check_num_tess_sectors():
i = 1
has_data = True
while has_data:
url = 'https://tess.mit.edu/wp-content/uploads/all_targets_S{}_v1.csv'.format(str(i).zfill(3))
r = requests.get(url)
has_data = r.ok
if has_data:
i += 1
if i - 1 != NUM_TESS_SECTORS:
print("NUM_TESS_SECTORS is listed as {0}, but data was found for {1} sectors: update the variable NUM_TESS_SECTORS for the full data.".format(NUM_TESS_SECTORS, i))
def get_tess_stellar(sectors=None, unique=True, force_resave=False, force_redownload=False):
'''
Wrapper around tess_target_stars.py to merge all sectors.
Arguments
---------
sectors : list
A list of sector IDs to query.
unique : bool
If true, this function cuts down the stellar dataframe to only unique entries, and adds a few columns.
force_resave : bool
If true, forces a reread of the constituent files from the URL (rerun of get_tses_stars_from_sector)
Returns
-------
stlr : pd.DataFrame
The stellar dataframe. If `unique`, the returned value is instead:
stlr : pd.DataFrame
The stellar dataframe, with duplicates dropped and the following columns added:
sectors, str : the sectors in which the target was observed.
dataspan, scalar : 27.4 days times the number of sectors in which the target was observed.
dutycycle, scalar : the fraction 13.0/13.6 (for the 0.6 day downlink)
noise, scalar : the 1-hour photometric noise (replacement for CDPP but not averaged over timescales)
'''
if sectors is None:
sectors = list(range(1, NUM_TESS_SECTORS + 1))
frames = []
sector_obs = {}
sector_cnt = {}
noises = {}
for s in sectors:
datapath = os.path.join(TESS_DATAPATH, "TESS_targets_S{}.csv".format(str(s).zfill(3)))
if os.path.exists(datapath) and (not force_resave):
df = pd.read_csv(datapath, comment='#', index_col=0)
else:
df = get_tess_stars_from_sector(s, force_redownload=force_redownload)
if unique:
for ticid, noise in zip(df["ticid"].values, df["noise"].values):
if ticid not in sector_obs:
sector_obs[ticid] = str(s)
sector_cnt[ticid] = 1
noises[ticid] = str(noise)
else:
sector_obs[ticid] += ',' + str(s)
sector_cnt[ticid] += 1
noises[ticid] += ',' + str(noise)
frames.append(df)
stlr = pd.concat(frames)
if unique:
stlr.drop_duplicates(subset="ticid", inplace=True)
stlr["sectors"] = [sector_obs.get(ticid) for ticid in stlr["ticid"].values]
stlr["noise"] = [noises.get(ticid) for ticid in stlr["ticid"].values]
stlr["dataspan"] = 27.4 * np.array([sector_cnt.get(ticid) for ticid in stlr["ticid"].values])
stlr["dutycycle"] = 13.0/13.7 * np.ones_like(stlr["dataspan"])
return stlr
def get_tois(subpath="toi_catalog.csv", force_redownload=False):
'''
Request a pandas dataframe of all the TESS objects of interest.
'''
fullpath = os.path.join(TESS_DATAPATH, subpath)
if (not force_redownload) and os.path.exists(fullpath):
return | pd.read_csv(fullpath, comment='#') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 14:13:20 2022
@author: scott
Visualizations
--------------
Plotly-based interactive visualizations
"""
import pandas as pd
import numpy as np
import spiceypy as spice
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import plotly.graph_objects as go
import plotly
import plotly.express as px
import pdb
from Ephem import *
from Events import *
#%% Visualizing Orbital Angular Momentum Space
def plot_h_space_numeric(df,color='i',logColor=False,colorscale='Blackbody'):
'''
Plot the catalog of objects in angular momentum space.
Color by a numeric parameter.
'''
method = 'plotly'
if method == 'matplotlib':
# Simple matplotlib scatter plot
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(projection='3d')
ax.scatter(df.hx,df.hy,df.hz,s=1)
plt.show()
elif method == 'plotly':
# Plotly scatter
import plotly.graph_objects as go
import plotly
import plotly.express as px
# Select color data
c = df[color]
color_label = color
if logColor == True:
# Log of color
c = np.log10(c)
color_label = 'log('+color+')'
fig = go.Figure(data=[go.Scatter3d(
x=df.hx,
y=df.hy,
z=df.hz,
customdata=df[['Name','a','e','i','om','w']],
hovertext = df.Name,
hoverinfo = 'text+x+y+z',
hovertemplate=
"<b>%{customdata[0]}</b><br><br>" +
"hx: %{x:.2f}<br>" +
"hy: %{y:.2f}<br>" +
"hz: %{z:.2f}<br>" +
"a: %{customdata[1]:.2f} km<br>" +
"e: %{customdata[2]:.2f}<br>" +
"i: %{customdata[3]:.2f} deg<br>" +
"om: %{customdata[4]:.2f} deg<br>" +
"w: %{customdata[5]:.2f} deg<br>" +
"",
mode='markers',
marker=dict(
size=1,
color=c, # set color to an array/list of desired values
colorscale=colorscale, # choose a colorscale 'Viridis'
opacity=0.8,
colorbar=dict(thickness=20,title=color_label)
),
)])
# Update figure title and layout
fig.update_layout(
# title='2D Scatter',
title_x = 0.5,
xaxis=dict(
title='hx',
gridcolor='white',
gridwidth=1,
# type="log",
# exponentformat = "power",
# range = [-1, 2],
),
yaxis=dict(
title='hy',
gridcolor='white',
gridwidth=1,
# autorange = True,
# type="log",
# exponentformat = "power",
# autorange='reversed',
# range=[0,1],
),
# paper_bgcolor='rgb(243, 243, 243)',
# plot_bgcolor='rgb(243, 243, 243)',
# paper_bgcolor='rgb(0, 0, 0)',
# plot_bgcolor='rgb(0, 0, 0)',
)
# Render
plotly.offline.plot(fig, validate=False, filename='AngMomentumScatter.html')
return
def plot_h_space_cat(df,cat='vishnu_cluster'):
'''
Plot the catalog of objects in angular momentum space.
Color by a categorical parameter
'''
import plotly.graph_objects as go
import plotly
# Check if data is timeseries (from multiple months)
timeseries = False
filename = 'AngMomentumScatter.html'
mode = 'markers'
if len(df[df.duplicated(subset='NoradId')]) > 0:
# Timeseries plots need to add blank line of None values between lines
# see: https://stackoverflow.com/questions/56723792/how-to-efficiently-plot-a-large-number-of-line-shapes-where-the-points-are-conne
timeseries = True
filename = 'AngMomentumScatterTimeseries.html'
mode = 'lines+markers'
# Create figure
fig = go.Figure()
# Extract region data
from natsort import natsorted
region_names = natsorted(list(df[cat].unique())) # Names of regions
# Ensure region names are strings
region_names = [str(x) for x in region_names]
df[cat] = df[cat].astype(str)
if timeseries == False:
region_data = {region:df.query(cat+" == '%s'" %region)
for region in region_names}
else:
# Timeseries data
# Loop through regions
region_data = {} # Instantiate region data dict
for region in region_names:
# Extract the data
data = df.query(cat+" == '%s'" %region) # Get the data
data = data.sort_values(by=['NoradId','Epoch']).reset_index(drop=True)
# Add blank rows between groups of objects
grouped = data.groupby('NoradId')
data = pd.concat([i.append({'NoradId': None}, ignore_index=True) for _, i in grouped]).reset_index(drop=True)
# Append to dict
region_data.update({region : data})
# Add traces
for region_name, region in region_data.items():
# Get the coordinates
x = region['hx']
y = region['hy']
z = region['hz']
fig.add_trace(go.Scatter3d(
x=x,
y=y,
z=z,
name = region_name,
customdata=region[['Name','a','e','i','om','w']],
hovertext = region['Name'],
hoverinfo = 'text+x+y+z',
hovertemplate=
"<b>%{customdata[0]}</b><br><br>" +
"hx: %{x:.2f}<br>" +
"hy: %{y:.2f}<br>" +
"hz: %{z:.2f}<br>" +
"a: %{customdata[1]:.2f} km<br>" +
"e: %{customdata[2]:.2f}<br>" +
"i: %{customdata[3]:.2f} deg<br>" +
"om: %{customdata[4]:.2f} deg<br>" +
"w: %{customdata[5]:.2f} deg<br>" +
"",
mode=mode,
marker=dict(
size=1,
# color = color_dict[region_name],
opacity=0.8,
# colorbar=dict(thickness=20,title=cat)
),
)
)
if timeseries == True:
# Do not connect timesereies
fig.update_traces(connectgaps=False)
# Update figure title and layout
fig.update_layout(
# title='2D Scatter',
title_x = 0.5,
xaxis=dict(
title='hx',
gridcolor='white',
gridwidth=1,
# type="log",
# exponentformat = "power",
# range = [-1, 2],
),
yaxis=dict(
title='hy',
gridcolor='white',
gridwidth=1,
# autorange = True,
# type="log",
# exponentformat = "power",
# autorange='reversed',
# range=[0,1],
),
# paper_bgcolor='rgb(243, 243, 243)',
# plot_bgcolor='rgb(243, 243, 243)',
# paper_bgcolor='rgb(0, 0, 0)',
# plot_bgcolor='rgb(0, 0, 0)',
)
# Update figure layout
fig.update_layout(legend=dict(
title='Clusters: {}'.format(cat),
itemsizing='constant',
itemdoubleclick="toggleothers",
# yanchor="top",
# y=0.99,
# xanchor="right",
# x=0.01,
))
# Update ranges
fig.update_layout(
scene = dict(
xaxis = dict(nticks=4, range=[-20*1E4,20*1E4],),
yaxis = dict(nticks=4, range=[-20*1E4,20*1E4],),
zaxis = dict(nticks=4, range=[-20*1E4,20*1E4],),
aspectmode = 'cube',
),
# width=700,
# margin=dict(r=20, l=10, b=10, t=10)
)
# Render
plotly.offline.plot(fig, validate=False, filename=filename)
return
#%% Scatter Plots
def plot_2d_scatter_numeric(df,xlabel,ylabel,color,logColor=False,size=1.):
'''
Generate a 2D scatter plot using any available numeric feilds as the x,y,
and color coordinates. Returns an interactive scatter plot with hover data
showing information on each satellite.
Example:
>> plot_2d_scatter(df,'h','hz','i')
'''
import plotly.graph_objects as go
import plotly
import plotly.express as px
# Error checking
if xlabel not in list(df.columns):
raise ValueError('xlabel not in dataset')
if ylabel not in list(df.columns):
raise ValueError('ylabel not in dataset')
if color not in list(df.columns):
raise ValueError('color not in dataset')
X = df[[xlabel,ylabel]].to_numpy()
# Create grid to evaluate
Nx = 20
Ny = 20
xmin, xmax = (df[xlabel].min(), df[xlabel].max())
ymin, ymax = (df[ylabel].min(), df[ylabel].max())
# Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
# np.linspace(ymin, ymax, Ny)))).T
# Evaluate density
# from sklearn.neighbors import KernelDensity
# kde1 = KernelDensity(bandwidth=5, kernel='gaussian')
# log_dens1 = kde1.fit(X).score_samples(Xgrid)
# dens1 = X.shape[0] * np.exp(log_dens1).reshape((Ny, Nx))
# Select color data
c = df[color]
color_label = color
if logColor == True:
# Log of color
c = np.log10(c)
color_label = 'log('+color+')'
# Construct figure
fig = go.Figure()
# Add trace
fig.add_trace(
go.Scattergl(
x = df[xlabel],
y = df[ylabel],
customdata=df[['Name','a','e','i','om','w','h','hx','hy','hz']],
hovertext = df.Name,
hoverinfo = 'text+x+y+z',
hovertemplate=
"<b>%{customdata[0]}</b><br><br>" +
"x: %{x:.2f}<br>" +
"y: %{y:.2f}<br>" +
"a: %{customdata[1]:.2f} km<br>" +
"e: %{customdata[2]:.2f}<br>" +
"i: %{customdata[3]:.2f} deg<br>" +
"om: %{customdata[4]:.2f} deg<br>" +
"w: %{customdata[5]:.2f} deg<br>" +
"h: %{customdata[6]:.2f}<br>" +
"hx: %{customdata[7]:.2f}<br>" +
"hy: %{customdata[8]:.2f}<br>" +
"hz: %{customdata[9]:.2f}<br>" +
"",
mode = 'markers',
marker = dict(
color = c,
size = size,
colorscale='Blackbody', # choose a colorscale 'Viridis'
opacity=0.99,
colorbar=dict(thickness=20,title=color_label)
)
)
)
# Add density trace
# from skimage import data
# img = data.camera()
# fig.add_trace(go.Contour(
# z=dens1,
# x=np.linspace(xmin,xmax,Nx), # horizontal axis
# y=np.linspace(ymin,ymax,Ny) # vertical axis
# )
# )
# Update figure title and layout
fig.update_layout(
title='2D Scatter',
title_x = 0.5,
xaxis=dict(
title=xlabel,
gridcolor='white',
gridwidth=1,
# type="log",
# exponentformat = "power",
# range = [-1, 2],
),
yaxis=dict(
title=ylabel,
gridcolor='white',
gridwidth=1,
# autorange = True,
# type="log",
# exponentformat = "power",
# autorange='reversed',
# range=[0,1],
),
# paper_bgcolor='rgb(243, 243, 243)',
# plot_bgcolor='rgb(243, 243, 243)',
# paper_bgcolor='rgb(0, 0, 0)',
# plot_bgcolor='rgb(0, 0, 0)',
)
# Render
plotly.offline.plot(fig, validate=False, filename='Scatter.html')
return
def plot_kde(df,xlabel,ylabel):
# Error checking
if xlabel not in list(df.columns):
raise ValueError('xlabel not in dataset')
if ylabel not in list(df.columns):
raise ValueError('ylabel not in dataset')
# if color not in list(df.columns):
# raise ValueError('color not in dataset')
# Extract data
X = df[[xlabel,ylabel]].to_numpy()
Nx = 50
Ny = 50
bandwidth = 10000
xmin, xmax = (df[xlabel].min(), df[xlabel].max())
ymin, ymax = (df[ylabel].min(), df[ylabel].max())
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
# # Create grid to evaluate
# from astroML.datasets import fetch_great_wall
# X = fetch_great_wall()
# Nx = 50
# Ny = 125
# bandwidth = 5
# xmin, xmax = (-375, -175)
# ymin, ymax = (-300, 200)
# Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
# np.linspace(ymin, ymax, Ny)))).T
# Evaluate density
from sklearn.neighbors import KernelDensity
kde1 = KernelDensity(bandwidth=bandwidth, kernel='gaussian')
log_dens1 = kde1.fit(X).score_samples(Xgrid)
dens1 = X.shape[0] * np.exp(log_dens1).reshape((Ny, Nx))
# Plot the figure
fig, ax = plt.subplots(figsize=(8, 8))
plt.imshow(dens1, origin='lower',
# norm=LogNorm(),
# cmap=plt.cm.binary,
cmap=plt.cm.hot_r,
extent=(xmin, xmax, ymin, ymax), )
plt.colorbar(label='density')
ax.scatter(X[:, 0], X[:, 1], s=1, lw=0, c='k') # Add points
# Creat colorbar
plt.show()
return
#%% Main DIT Analysis Figures
def plot_time_windows(wins,groups,Types,
colors=None,filename=None,group_label='group',title="Time Windows"):
'''
Plot a Gantt chart displaying a set of time windows.
'''
df_list = []
for i in range(len(wins)):
# Convert window to dataframe
win = wins[i] # Extract window
dfi = window_to_dataframe(win,timefmt='datetime') # Access times (datetime)
dfi[group_label] = groups[i] # y-labels
dfi['Type'] = Types[i] # Types
df_list.append(dfi) # Append to list
# Concat all dataframes
df = pd.concat(df_list)
# Generate colors
if colors is None:
# colors = px.colors.qualitative.Plotly[:len(groups)]
colors = px.colors.qualitative.Plotly
# Create gant chart
fig = px.timeline(df, x_start="Start", x_end="Stop", y=group_label, color="Type",
color_discrete_sequence=colors,
title=title,
)
# Update bar height
BARHEIGHT = .1
fig.update_layout(
yaxis={"domain": [max(1 - (BARHEIGHT * len(fig.data)), 0), 1]}, margin={"t": 0, "b": 0}
)
# Add range slider
fig.update_layout(
xaxis=dict(
rangeselector=dict(
),
rangeslider=dict(
visible=True
),
type="date"
)
)
# # Add title to figure
# fig.update_layout(
# title = {'text':title}
# )
# Render
if filename is None:
filenmae = 'temp-plot.html'
plotly.offline.plot(fig, filename = str(filename), validate=False)
return
def plot_visibility(dftopo,filename=None,title=None):
''' Plot the visibility data for a single ground station '''
from plotly.subplots import make_subplots
import plotly.graph_objects as go
# Constraints
cutoff_mag = 15. # Maximum magnitude for visibility
# Compute contrained stats
msat = dftopo.Vmag.to_numpy()
max_mag = np.nanmax(msat[msat<=cutoff_mag]) # Maximum (dimest) magnitude
min_mag = np.nanmin(msat[msat<=cutoff_mag]) # Minimum (brightest) magnitude
avg_mag = np.nanmean(msat[msat<=cutoff_mag]) # Mean magnitude
start_et = dftopo.ET.min()
stop_et = dftopo.ET.max()
# Copy original dataframe
dftopo1 = dftopo.copy()
# Insert blank line between time gaps
et = dftopo.ET.to_numpy() # Extract ephemeris time
ind = np.where(np.diff(et)>100.)[0]
df_new = pd.DataFrame(index=ind + 0.5) # New dataframe at half integer indices
dftopo = pd.concat([dftopo, df_new]).sort_index()
# Generate a subplot
fig = make_subplots(rows=3, cols=1, shared_xaxes=True)
# First trace. Solar and Sat Elevation.
fig.add_trace(
go.Scatter(x=dftopo.ET, y= np.rad2deg(dftopo['Sun.El']),
mode='lines',name='Sun.El',legendgroup = '1' ),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=dftopo.ET, y= np.rad2deg(dftopo['Sat.El']),
mode='lines',name='Sat.El',legendgroup = '1' ),
row=1, col=1
)
# Second trace. Sat Range.
fig.add_trace(
go.Scatter(x=dftopo.ET, y=dftopo['Sat.R'],
mode='lines',name='Sat.Range',legendgroup = '2' ),
row=2, col=1
)
# Third trace. Visual Magnitude.
fig.add_trace(
go.Scatter(x=dftopo.ET, y=dftopo['Vmag'],
mode='lines',name='Vmag',legendgroup = '3' ),
row=3, col=1
)
fig.add_trace(
go.Scatter(x=dftopo.ET, y=dftopo['Vmag2'],
mode='lines',name='Vmag2',legendgroup = '3' ),
row=3, col=1
)
# Add shape regions
fig.add_hrect(
y0=min_mag, y1=max_mag,
fillcolor="LightSalmon", opacity=0.3,
layer="below", line_width=0,
row=3, col=1
),
# Update yaxis properties
fig.update_xaxes(title_text="Epoch (ET)", row=3, col=1)
# Update yaxis properties
fig.update_yaxes(title_text="Elevation (deg)", row=1, col=1)
fig.update_yaxes(title_text="Range (km)", row=2, col=1)
fig.update_yaxes(title_text="Visual Magnitude (mag)", row=3, col=1)
# Reverse Vmag axes
fig.update_yaxes(autorange="reversed", row=3, col=1)
# Add gap in legend groups
fig.update_layout(legend_tracegroupgap = 300)
# Update title
fig.update_layout(title_text=title)
# Render
if filename is None:
filenmae = 'temp-plot.html'
plotly.offline.plot(fig, filename = str(filename), validate=False)
# Reset topo
dftopo = dftopo1
return
def plot_overpass_skyplot(dftopo, dfa, filename=None,title=None):
''' Generate a skyplot of the visible passes for a single station '''
# Bin data based on access time intervals
# See: https://towardsdatascience.com/how-i-customarily-bin-data-with-pandas-9303c9e4d946
dftopo1 = dftopo.copy()
if 'Sat.Vmag' not in dftopo1.columns:
# Compute visual magnitudes
Rsat = 1 # Radius of satellite (m)
msat = compute_visual_magnitude(dftopo1,Rsat,p=0.25,k=0.12) # With airmass
dftopo1['Sat.Vmag'] = msat
# Remove nan
dftopo1 = dftopo1[pd.notnull(dftopo1['Sat.Vmag'])]
# Create bins of ranges for each access interval
ranges = pd.IntervalIndex.from_tuples(list(zip(dfa['Start'], dfa['Stop'])),closed='both')
labels = dfa.Access.astype(str).to_list()
# Apply cut to label access periods
dftopo1['Access'] = pd.cut(dftopo1['ET'], bins=ranges, labels=labels).map(dict(zip(ranges,labels)))
# Remove non-access
dftopo1 = dftopo1[pd.notnull(dftopo1.Access)]
# Add blank rows between groups of objects
grouped = dftopo1.groupby('Access')
dftopo1 = pd.concat([i.append({'Access': None}, ignore_index=True) for _, i in grouped]).reset_index(drop=True)
# Forward fill na in Access
dftopo1.Access = dftopo1.Access.fillna(method="ffill")
import plotly.graph_objects as go
import plotly.express as px
import plotly
# Convert angles to degrees
dftopo1['Sat.El'] = np.rad2deg(dftopo1['Sat.El'])
dftopo1['Sat.Az'] = np.rad2deg(dftopo1['Sat.Az'])
# Plotly express (color by access)
fig = px.line_polar(dftopo1, r="Sat.El", theta="Sat.Az",
color="Access",
color_discrete_sequence=px.colors.sequential.Plasma_r)
# Multicolored lines
# See: https://stackoverflow.com/questions/69705455/plotly-one-line-different-colors
# Remove gaps
fig.update_traces(connectgaps=False)
# Reverse polar axis
fig.update_layout(
polar = dict(
radialaxis = dict(range = [90,0]),
angularaxis = dict(
tickfont_size=10,
rotation=90, # start position of angular axis
direction="clockwise",
showticklabels = True,
ticktext = ['0','1','2','3','4','5','6','7']
)
),
)
# # Add button to toggle traces on/off
# button2 = dict(method='restyle',
# label='All',
# visible=True,
# args=[{'visible':True}],
# args2 = [{'visible': False}],
# )
# # Create menu item
# um = [{'buttons':button2, 'label': 'Show', 'showactive':True,
# # 'x':0.3, 'y':0.99,
# }]
# pdb.set_trace()
# # add dropdown menus to the figure
# fig.update_layout(showlegend=True, updatemenus=um)
# Render
if filename is None:
filenmae = 'temp-plot.html'
plotly.offline.plot(fig, filename = str(filename), validate=False)
del dftopo1
return
#%% Overpass plots
def plot_access_times(access,gslight,gsdark,satlight, satpartial, satdark):
'''
Generate a timeline plot showing the access intervals and lighting conditions
of the satellite as seen from a groundstation.
Parameters
----------
access : SpiceCell
Window containing line-of-sight access intervals.
gsdark : SpiceCell
Window containing time intervals of station darkness.
satlight : SpiceCell
Window containing time intervals of sat full sunlight.
satpartial : SpiceCell
Window containing time intervals of sat partial sunlight.
'''
# Process interval sets
# Line-of-sight Access
dfa = window_to_dataframe(access,timefmt='datetime') # Access times (datetime)
dfa['trace'] = 'Viewing Geometry' # Trace label
dfa['Type'] = 'Above horizon' # Access type
# Visible Access
# Compute set difference
# visaccess = access - gslight -satdark
vis = spice.wndifd(access,gslight) # Subtract station daylight
vis = spice.wndifd(vis,satdark) # Subtract sat darkness
dfvis = window_to_dataframe(vis,timefmt='datetime') # Access times (datetime)
dfvis['trace'] = 'Visibility' # Trace label
dfvis['Type'] = 'Visible Access' # Access type
# Groundstation dark
dfgs = window_to_dataframe(gsdark,timefmt='datetime') # Ground station dark times (datetime)
dfgs['trace'] = 'Station Lighting' # Trace label
dfgs['Type'] = 'GS Dark' # Trace label
# Satellite Sunlight
dfss = window_to_dataframe(satlight,timefmt='datetime') # Sat light times (datetime)
dfss['trace'] = 'Sat Lighting' # Trace label
dfss['Type'] = 'Sat Sun' # Trace label
# Satellite Penumbra
dfsp = window_to_dataframe(satpartial,timefmt='datetime') # Sat light times (datetime)
dfsp['trace'] = 'Sat Lighting' # Trace label
dfsp['Type'] = 'Sat Penumbra' # Trace label
# Compine dataframes
df = pd.concat( [dfgs[['Start', 'Stop', 'Duration','Type','trace']],
dfss[['Start', 'Stop', 'Duration','Type','trace']],
dfsp[['Start', 'Stop', 'Duration','Type','trace']],
dfa[['Start', 'Stop', 'Duration','Type','trace']],
dfvis[['Start', 'Stop', 'Duration','Type','trace']],
])
# Create gant chart
fig = px.timeline(df, x_start="Start", x_end="Stop", y="trace", color="Type",
color_discrete_sequence=["black","goldenrod","grey","blue","red"],
)
# Update bar height
BARHEIGHT = .1
fig.update_layout(
yaxis={"domain": [max(1 - (BARHEIGHT * len(fig.data)), 0), 1]}, margin={"t": 0, "b": 0}
)
# Add range slider
fig.update_layout(
xaxis=dict(
rangeselector=dict(
),
rangeslider=dict(
visible=True
),
type="date"
)
)
# Render
filename = 'AccessPeriods.html'
plotly.offline.plot(fig, validate=False, filename=filename)
return
def plot_overpass_magnitudes(dftopo, dfa):
# Bin data based on access time intervals
# See: https://towardsdatascience.com/how-i-customarily-bin-data-with-pandas-9303c9e4d946
dftopo1 = dftopo.copy()
# Compute visual magnitudes
Rsat = 1 # Radius of satellite (m)
msat = compute_visual_magnitude(dftopo1,Rsat,p=0.25,k=0.12,include_airmass=True) # With airmass
# msat = compute_visual_magnitude(dftopo1,Rsat,p=0.25,k=0.12,include_airmass=False) # Without airmass
dftopo1['Sat.Vmag'] = msat
# Remove nan
dftopo1 = dftopo1[pd.notnull(dftopo1['Sat.Vmag'])]
# Create bins of ranges for each access interval
ranges = pd.IntervalIndex.from_tuples(list(zip(dfa['Start'], dfa['Stop'])),closed='both')
labels = dfa.Access.astype(str).to_list()
# Apply cut to label access periods
dftopo1['Access'] = pd.cut(dftopo1['UTCG'], bins=ranges, labels=labels).map(dict(zip(ranges,labels)))
# Remove non-access
dftopo1 = dftopo1[pd.notnull(dftopo1.Access)]
# Remove -ve elevations
# dftopo1 = dftopo1[]
# Add blank rows between groups of objects
grouped = dftopo1.groupby('Access')
dftopo1 = pd.concat([i.append({'Access': None}, ignore_index=True) for _, i in grouped]).reset_index(drop=True)
# Forward fill na in Access
dftopo1.Access = dftopo1.Access.fillna(method="ffill")
# Generate ticks for colorscale
Vmin = dftopo1['Sat.Vmag'].min() # Min (brightest)
Vmax = +30 # Limiting magnitude
cticks = np.arange(int((Vmin//5)*5.),int(Vmax)+5, 5)
# Assign markersize
# Want to scale size of markers based on magnitude
# Values range from
# (Brightest) (Dimest)
# -2 0 2 4 6 ... 30 ... 70
# ^ ^
# 10 1
# Size range
y1 = 5 # Max marker size
y2 = 0.1 # Min marker size
# Mag range
x1 = 0 # Min mag (brightest)
x2 = 30 # Max mag (dimmest)
# Set size
# See: https://github.com/eleanorlutz/western_constellations_atlas_of_space/blob/main/6_plot_maps.ipynb
dftopo1['size'] = np.nan # Initialize
dftopo1['size'] = y1 + ((y2-y1)/(x2-x1))*(dftopo1['Sat.Vmag'] - x1)
dftopo1['size'][dftopo1['size']<1] = 1 # Limit minimum size
dftopo1['size'][ | pd.isnull(dftopo1['size']) | pandas.isnull |
"""
.. module:: projectdirectory
:platform: Unix, Windows
:synopsis: A module for examining collections of git repositories as a whole
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import math
import sys
import os
import numpy as np
import pandas as pd
from git import GitCommandError
from gitpandas.repository import Repository
__author__ = 'willmcginnis'
class ProjectDirectory(object):
"""
An object that refers to a directory full of git repositories, for bulk analysis. It contains a collection of
git-pandas repository objects, created by os.walk-ing a directory to file all child .git subdirectories.
:param working_dir: (optional, default=None), the working directory to search for repositories in, None for cwd, or an explicit list of directories containing git repositories
:param ignore: (optional, default=None), a list of directories to ignore when searching for git repos.
:param verbose: (default=True), if True, will print out verbose logging to terminal
:return:
"""
def __init__(self, working_dir=None, ignore=None, verbose=True):
if working_dir is None:
self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(os.getcwd()) if '.git' in x[0]])
elif isinstance(working_dir, list):
self.repo_dirs = working_dir
else:
self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(working_dir) if '.git' in x[0]])
self.repos = [Repository(r, verbose=verbose) for r in self.repo_dirs]
if ignore is not None:
self.repos = [x for x in self.repos if x._repo_name not in ignore]
def _repo_name(self):
"""
Returns a DataFrame of the repo names present in this project directory
:return: DataFrame
"""
ds = [[x._repo_name()] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository'])
return df
def is_bare(self):
"""
Returns a dataframe of repo names and whether or not they are bare.
:return: DataFrame
"""
ds = [[x._repo_name(), x.is_bare()] for x in self.repos]
df = | pd.DataFrame(ds, columns=['repository', 'is_bare']) | pandas.DataFrame |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual( | ct(10, unit='s') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
import re
import warnings
import numpy as np
import pandas as pd
from Amplo.Utils import clean_keys
class DataProcesser:
def __init__(self,
target: str = None,
float_cols: list = None,
int_cols: list = None,
date_cols: list = None,
cat_cols: list = None,
include_output: bool = False,
missing_values: str = 'interpolate',
outlier_removal: str = 'clip',
z_score_threshold: int = 4,
version: int = 1,
verbosity: int = 0,
):
"""
Preprocessing Class. Cleans a dataset into a workable format.
Deals with Outliers, Missing Values, duplicate rows, data types (floats, categorical and
dates), Not a Numbers, Infinities.
Parameters
----------
target str: Column name of target variable
num_cols list: Numerical columns, all parsed to integers and floats
date_cols list: Date columns, all parsed to pd.datetime format
cat_cols list: Categorical Columns. Currently all one-hot encoded.
missing_values str: How to deal with missing values ('remove', 'interpolate' or 'mean')
outlier_removal str: How to deal with outliers ('clip', 'quantiles', 'z-score' or 'none')
z_score_threshold int: If outlierRemoval='z-score', the threshold is adaptable, default=4.
folder str: Directory for storing the output files
version int: Versioning the output files
mode str: classification / regression
"""
# Tests
mis_values_algo = ['remove_rows', 'remove_cols', 'interpolate', 'mean', 'zero']
assert missing_values in mis_values_algo, \
'Missing values algorithm not implemented, pick from {}'.format(', '.join(mis_values_algo))
out_rem_algo = ['quantiles', 'z-score', 'clip', 'none']
assert outlier_removal in out_rem_algo, \
'Outlier Removal algorithm not implemented, pick from {}'.format(', '.join(out_rem_algo))
# Arguments
self.version = version
self.includeOutput = include_output
self.target = target if target is None else re.sub("[^a-z0-9]", '_', target.lower())
self.float_cols = [] if float_cols is None else [re.sub('[^a-z0-9]', '_', fc.lower()) for fc in float_cols]
self.int_cols = [] if int_cols is None else [re.sub('[^a-z0-9]', '_', ic.lower()) for ic in int_cols]
self.num_cols = self.float_cols + self.int_cols
self.cat_cols = [] if cat_cols is None else [re.sub('[^a-z0-9]', '_', cc.lower()) for cc in cat_cols]
self.date_cols = [] if date_cols is None else [re.sub('[^a-z0-9]', '_', dc.lower()) for dc in date_cols]
if self.target in self.num_cols:
self.num_cols.remove(self.target)
# Algorithms
self.missing_values = missing_values
self.outlier_removal = outlier_removal
self.z_score_threshold = z_score_threshold
# Fitted Settings
self.dummies = {}
self._q1 = None
self._q3 = None
self._means = None
self._stds = None
# Info for Documenting
self.is_fitted = False
self.verbosity = verbosity
self.removedDuplicateRows = 0
self.removedDuplicateColumns = 0
self.removedOutliers = 0
self.imputedMissingValues = 0
self.removedConstantColumns = 0
def fit_transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Fits this data cleaning module and returns the transformed data.
Parameters
----------
data [pd.DataFrame]: Input data
Returns
-------
data [pd.DataFrame]: Cleaned input data
"""
if self.verbosity > 0:
print('[AutoML] Data Cleaning Started, ({} x {}) samples'.format(len(data), len(data.keys())))
# Clean Keys
data = clean_keys(data)
# Remove Duplicates
data = self.remove_duplicates(data)
# Infer data-types
self.infer_data_types(data)
# Convert data types
data = self.convert_data_types(data, fit_categorical=True)
# Remove outliers
data = self.remove_outliers(data, fit=True)
# Remove missing values
data = self.remove_missing_values(data)
# Remove Constants
data = self.remove_constants(data)
# Convert integer columns
data = self.convert_float_int(data)
# Clean target
data = self.clean_target(data)
# Finish
self.is_fitted = True
if self.verbosity > 0:
print('[AutoML] Processing completed, ({} x {}) samples returned'.format(len(data), len(data.keys())))
return data
def transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Function that takes existing settings (including dummies), and transforms new data.
Parameters
----------
data [pd.DataFrame]: Input data
Returns
-------
data [pd.DataFrame]: Cleaned input data
"""
assert self.is_fitted, "Transform only available for fitted objects, run .fit_transform() first."
# Clean Keys
data = clean_keys(data)
# Impute columns
data = self._impute_columns(data)
# Remove duplicates
data = self.remove_duplicates(data, rows=False)
# Convert data types
data = self.convert_data_types(data, fit_categorical=False)
# Remove outliers
data = self.remove_outliers(data, fit=False)
# Remove missing values
data = self.remove_missing_values(data)
# Convert integer columns
data = self.convert_float_int(data)
return data
def get_settings(self) -> dict:
"""
Get settings to recreate fitted object.
"""
assert self.is_fitted, "Object not yet fitted."
return {
'num_cols': self.num_cols,
'float_cols': self.float_cols,
'int_cols': self.int_cols,
'date_cols': self.date_cols,
'cat_cols': self.cat_cols,
'missing_values': self.missing_values,
'outlier_removal': self.outlier_removal,
'z_score_threshold': self.z_score_threshold,
'_means': None if self._means is None else self._means.to_json(),
'_stds': None if self._stds is None else self._stds.to_json(),
'_q1': None if self._q1 is None else self._q1.to_json(),
'_q3': None if self._q3 is None else self._q3.to_json(),
'dummies': self.dummies,
'fit': {
'imputed_missing_values': self.imputedMissingValues,
'removed_outliers': self.removedOutliers,
'removed_constant_columns': self.removedConstantColumns,
'removed_duplicate_rows': self.removedDuplicateRows,
'removed_duplicate_columns': self.removedDuplicateColumns,
}
}
def load_settings(self, settings: dict) -> None:
"""
Loads settings from dictionary and recreates a fitted object
"""
self.num_cols = settings['num_cols'] if 'num_cols' in settings else []
self.float_cols = settings['float_cols'] if 'float_cols' in settings else []
self.int_cols = settings['int_cols'] if 'int_cols' in settings else []
self.cat_cols = settings['cat_cols'] if 'cat_cols' in settings else []
self.date_cols = settings['date_cols'] if 'date_cols' in settings else []
self.missing_values = settings['missing_values'] if 'missing_values' in settings else []
self.outlier_removal = settings['outlier_removal'] if 'outlier_removal' in settings else []
self.z_score_threshold = settings['z_score_threshold'] if 'z_score_threshold' in settings else []
self._means = None if settings['_means'] is None else pd.read_json(settings['_means'])
self._stds = None if settings['_stds'] is None else pd.read_json(settings['_stds'])
self._q1 = None if settings['_q1'] is None else pd.read_json(settings['_q1'])
self._q3 = None if settings['_q3'] is None else pd.read_json(settings['_q3'])
self.dummies = settings['dummies'] if 'dummies' in settings else {}
self.is_fitted = True
def infer_data_types(self, data: pd.DataFrame):
"""
In case no data types are provided, this function infers the most likely data types
"""
if len(self.cat_cols) == len(self.num_cols) == len(self.date_cols) == 0:
# First cleanup
data = data.infer_objects()
# Remove target from columns
if not self.includeOutput and self.target is not None and self.target in data:
data = data.drop(self.target, axis=1)
# Iterate through keys
for key in data.keys():
# Integer
if pd.api.types.is_integer_dtype(data[key]):
self.int_cols.append(key)
# Float
if pd.api.types.is_float_dtype(data[key]):
self.float_cols.append(key)
# Datetime
if | pd.api.types.is_datetime64_any_dtype(data[key]) | pandas.api.types.is_datetime64_any_dtype |
Subsets and Splits