prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4)
data_ly8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset8.csv",skiprows=4)
data_ly9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset9.csv",skiprows=4)
data_ly10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset10.csv",skiprows=4)
data_ly11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset11.csv",skiprows=4)
data_ly12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset12.csv",skiprows=4)
data_ly13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset13.csv",skiprows=4)
data_ly14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset14.csv",skiprows=4)
data_ly15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset15.csv",skiprows=4)
frames_ly = [data_ly1,data_ly2,data_ly3,data_ly4,data_ly5,data_ly6,data_ly7,data_ly8,data_ly9,data_ly10,data_ly11,data_ly12,data_ly13,data_ly14,data_ly15]
result_ly = pd.concat(frames_ly)
result_ly.index=range(7200)
df_ly = pd.DataFrame({'label': [4]},index=range(0,7200))
data_ly=pd.concat([result_ly,df_ly],axis=1)
#-------------------------------------------------------------------------------------------------
data_sit1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset1.csv",skiprows=4)
data_sit2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset2.csv",skiprows=4)
data_sit3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset3.csv",skiprows=4)
data_sit4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset4.csv",skiprows=4)
data_sit5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset5.csv",skiprows=4)
data_sit6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset6.csv",skiprows=4)
data_sit7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset7.csv",skiprows=4)
data_sit8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset8.csv",skiprows=4)
data_sit9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset9.csv",skiprows=4)
data_sit10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset10.csv",skiprows=4)
data_sit11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset11.csv",skiprows=4)
data_sit12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset12.csv",skiprows=4)
data_sit13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset13.csv",skiprows=4)
data_sit14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset14.csv",skiprows=4)
data_sit15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\dataset15.csv",skiprows=4)
frames_sit= [data_sit1,data_sit2,data_sit3,data_sit4,data_sit5,data_sit6,data_sit7,data_sit8,data_sit9,data_sit10,data_sit11,data_sit12,data_sit13,data_sit14,data_sit15]
result_sit = pd.concat(frames_sit)
result_sit.index=range(7199)
df_sit= pd.DataFrame({'label': [5]},index=range(0,7199))
data_sit=pd.concat([result_sit,df_sit],axis=1)
#----------------------------------------------------------------------------------------------------
data_sta1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset1.csv",skiprows=4)
data_sta2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset2.csv",skiprows=4)
data_sta3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset3.csv",skiprows=4)
data_sta4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset4.csv",skiprows=4)
data_sta5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset5.csv",skiprows=4)
data_sta6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset6.csv",skiprows=4)
data_sta7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset7.csv",skiprows=4)
data_sta8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset8.csv",skiprows=4)
data_sta9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset9.csv",skiprows=4)
data_sta10= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset10.csv",skiprows=4) | pandas.read_csv |
import chess
import chess.pgn
import chess.svg
import chess.engine
import re
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from datetime import datetime
from cairosvg import svg2png
#---------------------
STOCKFISH_PATH = '/usr/local/Cellar/stockfish/14/bin/stockfish'
engine = chess.engine.SimpleEngine.popen_uci(STOCKFISH_PATH)
starting_positions = {
'default': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
}
#---------------------
class chessGame():
def __init__(self, file_name, img_name='current_board.png', position='default', White='White', Black='Black', clock='10+0'):
self.img_name = img_name
self.file_name = file_name
self.board = chess.Board(fen=starting_positions[position])
self.game = chess.pgn.Game()
self.game.setup(self.board)
self.game.headers['White'] = White
self.game.headers['Black'] = Black
self.game.headers['Event'] = f'{White} v. {Black}'
self.game.headers['Date'] = datetime.today().strftime('%Y.%m.%d')
self.base_time, self.increament = [int(x) for x in clock.split('+')]
self.game.set_clock((self.base_time*60) + self.increament)
self.game.set_clock((self.base_time*60) + self.increament)
self.node = self.game
self.drawBoard(lastmove=False)
def move(self, moveIn, timePassed) -> bool:
uciMove = chess.Move.from_uci(moveIn)
if self.board.is_legal(uciMove):
self.node = self.node.add_main_variation(uciMove)
updateTime = (self.node.parent.parent.clock() if self.node.parent.parent else self.node.parent.clock()) - timePassed
self.node.set_clock(updateTime)
self.board.push(uciMove)
info = engine.analyse(self.board, chess.engine.Limit(time=0.2))
self.node.set_eval(score=info['score'])
try:
self.node.comment += f" [%prob(n) {self.node.eval().white().wdl(model='lichess').expectation()}]"
self.node.comment += f" [%prob(c) {2*self.node.eval().white().wdl(model='lichess').expectation() - 1}]"
except AttributeError:
pass
self.drawBoard()
return True
else:
return False
def getProb_c(self, node) -> float:
prob_regex = re.compile(r'\[%prob\(c\) (.*?)]')
match = prob_regex.search(node.comment)
return float(match.group(1))
def getProb_n(self, node) -> float:
prob_regex = re.compile(r'\[%prob\(n\) (.*?)]')
match = prob_regex.search(node.comment)
return float(match.group(1))
# def undoMove()
def drawBoard(self, lastmove=True, size=640) -> None:
if lastmove:
svg_board = chess.svg.board(self.board, size=size, lastmove=self.node.move)
else:
svg_board = chess.svg.board(self.board, size=size)
svg2png(bytestring=svg_board, write_to='img/'+self.img_name)
def drawProbability(self, fresh=False) -> None:
if not fresh:
prob = self.getProb_n(self.node)
else:
prob = 0.50
df = pd.DataFrame(columns=['color','probability'], data=[['white', prob], ['black', 1 - prob]])
df = df.set_index('color').reindex(df.set_index('color').sum().sort_values().index, axis=1)
ax = df.T.plot(kind='bar', stacked=True, colormap=ListedColormap(sns.color_palette('Greys', 10)), figsize=(1.5,8.3), legend = None)
plt.xticks([])
plt.yticks([])
plt.box(False)
plt.margins(0,0)
plt.axis('off')
for i,c in enumerate(ax.containers):
labels = ['%.2f' % v.get_height() if v.get_height() > 0 else '' for v in c]
if i == 0:
labels[0] = '+'+labels[0]
ax.bar_label(c, labels=labels, label_type='center')
else:
labels[0] = '-'+labels[0]
ax.bar_label(c, labels=labels, label_type='center', color='w')
plt.savefig('img/'+'current_probability.png', bbox_inches='tight', pad_inches=0, transparent=True)
def saveGame(self, save_dir) -> None:
save_name = f"{self.game.headers['Event']} [{self.game.headers['Date']}]"
pgn_file = open(f'{save_dir}/{save_name}', 'w', encoding='utf-8')
self.game.accept(chess.pgn.FileExporter(pgn_file))
def postAnalysis(self):
probArr = [[i+1, self.getProb_c(node)] for i,node in enumerate(self.game.mainline()) if node.eval()]
df = | pd.DataFrame(columns=['move','probability'], data=[[0,0.00]] + probArr) | pandas.DataFrame |
import tensorflow as tf
import tensorflow_probability as tfp
# from tensorflow.core.protobuf import config_pb2
import numpy as np
# import os
# from fit_model import load_data
import matplotlib.pyplot as plt
import time
import numbers
import pandas as pd
import tf_keras_tfp_lbfgs as funfac
from dotenv import load_dotenv
import os
import requests
from datetime import datetime, timedelta
# for the file selection dialogue (see https://codereview.stackexchange.com/questions/162920/file-selection-button-for-jupyter-notebook)
import traitlets
from ipywidgets import widgets
from IPython.display import display
from tkinter import Tk, filedialog
class SelectFilesButton(widgets.Button):
"""A file widget that leverages tkinter.filedialog."""
# see https: // codereview.stackexchange.com / questions / 162920 / file - selection - button - for -jupyter - notebook
def __init__(self, out, CallBack=None,Load=True):
super(SelectFilesButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.List())
# Create the button.
if Load:
self.description = "Load"
else:
self.description = "Save"
self.isLoad=Load
self.icon = "square-o"
self.style.button_color = "orange"
# Set on click behavior.
self.on_click(self.select_files)
self.CallBack = CallBack
self.out = widgets.Output()
@staticmethod
def select_files(b):
"""Generate instance of tkinter.filedialog.
Parameters
----------
b : obj:
An instance of ipywidgets.widgets.Button
"""
with b.out:
try:
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected files will be set to b.value
if b.isLoad:
filename = filedialog.askopenfilename() # multiple=False
else:
filename = filedialog.asksaveasfilename()
# print('Load/Save Dialog finished')
#b.description = "Files Selected"
#b.icon = "check-square-o"
#b.style.button_color = "lightgreen"
if b.CallBack is not None:
#print('Invoking CallBack')
b.CallBack(filename)
#else:
#print('no CallBack')
except:
#print('Problem in Load/Save')
#print('File is'+b.files)
pass
cumulPrefix = '_cumul_' # this is used as a keyword to identify whether this plot was already plotted
def getNumArgs(myFkt):
from inspect import signature
sig = signature(myFkt)
return len(sig.parameters)
class DataLoader(object):
def __init__(self):
load_dotenv()
def pull_data(self, uri='http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/daily_data'):
return requests.get(uri).json()
# return requests.get('http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/daily_data').json()
def get_new_data(self):
uri = "http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/data"
json_data = self.pull_data(uri)
table = np.array(json_data["rows"])
column_names = []
for x in json_data["fields"]:
column_names.append(x["name"])
df = pd.DataFrame(table, columns=column_names)
df["day"] = [datetime.fromtimestamp(x["$date"] / 1000) for x in df["day"].values]
df["id"] = df["latitude"].apply(lambda x: str(x)) + "_" + df["longitude"].apply(lambda x: str(x))
unique_ids = df["id"].unique()
regions = {}
for x in unique_ids:
regions[x] = {}
regions[x]["data_fit"] = df[df["id"] == x]
return regions, df
NumberTypes = (int, float, complex, np.ndarray, np.generic)
# The aim is to build a SEIR (Susceptible → Exposed → Infected → Removed)
# Model with a number of (fittable) parameters which may even vary from
# district to district
# The basic model is taken from the webpage
# https://gabgoh.github.io/COVID/index.html
# and the implementation is done in Tensorflow 1.3
# The temporal dimension is treated by unrolling the loop
CalcFloatStr = 'float32'
if False:
defaultLossDataType = "float64"
else:
defaultLossDataType = "float32"
defaultTFDataType = "float32"
defaultTFCpxDataType = "complex64"
def addDicts(dict1, dict2):
"""Merge dictionaries and keep values of common keys in list"""
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
val2 = dict1[key]
if equalShape(value.shape, val2.shape):
dict3[key] = value + val2
else:
print('Shape 1: ' + str(value.shape) + ", shape 2:" + str(val2.shape))
raise ValueError('Shapes of transfer values to add are not the same')
return dict3
def Init(noCuda=False):
"""
initializes the tensorflow system
"""
if noCuda is True:
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
tf.compat.v1.reset_default_graph() # currently just to shield tensorflow from the main program
# Init()
### tf.compat.v1.disable_eager_execution()
# sess = tf.compat.v1.Session()
# tf.device("/gpu:0")
# Here some code from the inverse Modeling Toolbox (<NAME>)
def iterativeOptimizer(myTFOptimization, NIter, loss, verbose=False):
if NIter <= 0:
raise ValueError("NIter has to be positive")
for n in range(NIter):
myTFOptimization() # summary?
myloss = loss().numpy()
if np.isnan(myloss):
raise ValueError("Loss is NaN. Aborting iteration.")
if verbose:
print(str(n) + "/" + str(NIter) + ": " + str(myloss))
return myloss # , summary
def optimizer(loss, otype='L-BFGS-B', NIter=300, oparam={'gtol': 0, 'learning_rate': None}, var_list=None, verbose=False):
"""
defines an optimizer to be used with "Optimize"
This function combines various optimizers from tensorflow and SciPy (with tensorflow compatibility)
Parameters
----------
loss : the loss function, which is a tensor that has been initialized but contains variables
otype (default: L-BFGS : The method of optimization to be used the following options exist:
from Tensorflow:
sgrad
nesterov
adadelta
adam
proxgrad
and from SciPy all the optimizers in the package tf.contrib.opt.ScipyOptimizerInterface
NIter (default: 300) : Number of iterations to be used
oparam : a dictionary to be passed to the detailed optimizers containing optimization parameters (e.g. "learning-rate"). See the individual documentation
var_list (default: None meaning all) : list of tensorflow variables to be used during minimization
verbose (default: False) : prints the loss during iterations if True
Returns
-------
an optimizer funtion (or lambda function)
See also
-------
Example
-------
"""
if NIter < 0:
raise ValueError("NIter has to be positive or zero")
optimStep = 0
if (var_list is not None) and not np.iterable(var_list):
var_list = [var_list]
# these optimizer types work strictly stepwise
elif otype == 'SGD':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.00003
print("setting up sgrad optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.SGD(learning_rate).minimize(loss, var_list=var_list) # 1.0
elif otype == 'nesterov':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.00002
print("setting up nesterov optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.SGD(learning_rate, nesterov=True, momentum=1e-4).minimize(loss, var_list=var_list) # 1.0
elif otype == 'adam':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.0013
print("setting up adam optimization with ", NIter, " iterations, learning_rate: ", learning_rate, ".")
optimStep = lambda loss: tf.keras.optimizers.Adam(learning_rate, 0.9, 0.999).minimize(loss, var_list=var_list) # 1.0
elif otype == 'adadelta':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.0005
print("setting up adadelta optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.Adadelta(learning_rate, 0.9, 0.999).minimize(loss, var_list=var_list) # 1.0
elif otype == 'adagrad':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.0012
print("setting up adagrad optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.Adagrad(learning_rate).minimize(loss, var_list=var_list) # 1.0
if optimStep != 0:
myoptim = lambda: optimStep(loss)
myOptimizer = lambda: iterativeOptimizer(myoptim, NIter, loss, verbose=verbose)
# these optimizers perform the whole iteration
elif otype == 'L-BFGS':
# normFac = None
# if "normFac" in oparam: # "max", "mean" or None
# normFac = oparam["normFac"]
func = funfac.function_factory(loss, var_list) # normFactors=normFac
# convert initial model parameters to a 1D tf.Tensor
init_params = func.initParams() # retrieve the (normalized) initialization parameters
# use the L-BFGS solver
myOptimizer = lambda: LBFGSWrapper(func, init_params, NIter)
# myOptimizer = lambda: tfp.optimizer.lbfgs_minimize(value_and_gradients_function=func,
# initial_position=init_params,
# tolerance=1e-8,
# max_iterations=NIter)
# # f_relative_tolerance = 1e-6,
else:
raise ValueError('Unknown optimizer: ' + otype)
return myOptimizer # either an iterative one or 'L-BFGS'
def LBFGSWrapper(func, init_params, NIter):
optim_results = tfp.optimizer.lbfgs_minimize(value_and_gradients_function=func,
initial_position=init_params,
tolerance=1e-7,
num_correction_pairs=5,
max_iterations=NIter)
# f_relative_tolerance = 1e-6
# converged, failed, num_objective_evaluations, final_loss, final_gradient, position_deltas, gradient_deltas
if not optim_results.converged:
tf.print("WARNING: optimization did not converge")
if optim_results.failed:
tf.print("WARNING: lines search failed during iterations")
res = optim_results.position
func.assign_new_model_parameters(res)
return optim_results.objective_value
def doNormalize(val, normalize, reference):
if normalize == "max":
val = val * tf.reduce_max(reference)
elif normalize == "mean":
val = val * tf.reduce_mean(reference)
return val
def invNormalize(val, normalize, reference):
if normalize == "max":
val = val / tf.reduce_max(reference)
elif normalize == "mean":
val = val / tf.reduce_mean(reference)
return val
@tf.custom_gradient
def monotonicPos(val, b2=1.0): # can also be called forcePositive
"""
applies a monotonic transform mapping the full real axis to the positive half space
This can be used to implicitely force the reconstruction results to be all-positive. The monotonic function is derived from a hyperboloid:
The function is continues and differentiable.
This function can also be used as an activation function for neural networks.
Parameters
----------
val : tensorflow array
The array to be transformed
Returns
-------
tensorflow array
The transformed array
Example
-------
"""
mysqrt = tf.sqrt(b2 + tf.square(val) / 4.0)
def grad(dy):
return dy * (0.5 + val / mysqrt / 4.0), None # no abs here!
# return mysqrt + val / 2.0, grad # This is the original simple equation, but it is numerically very unstable for small numbers!
# slightly better but not good:
# return val * (0.5 + tf.sign(val) * tf.sqrt(b2/tf.square(val)+0.25)), grad
taylor1 = b2 / (2.0 * mysqrt)
diff = val / 2.0 + mysqrt # for negative values this is a difference
# print('diff: ' + str(diff)+", val"+str(val)+" taylor:"+str(taylor1))
# if tf.abs(diff/val) < 2e-4: # this seems a good compromise between finite subtraction and taylor series
Order2N = val * tf.where(tf.abs(diff / val) < 2e-4, taylor1, diff)
p = taylor1 + (b2 + Order2N) / (2.0 * mysqrt), grad # this should be numerically more stable
return p
# This monotonic positive function is based on a Hyperbola modified that one of the branches appraoches zero and the other one reaches a slope of one
def invMonotonicPos(invinput, b2=1.0, Eps=0.0):
# a constant value > 0.0 0 which regulates the shape of the hyperbola. The bigger the smoother it becomes.
tfinit = tf.clip_by_value(invinput, clip_value_min=tf.constant(Eps, dtype=CalcFloatStr),
clip_value_max=tf.constant(np.Inf, dtype=CalcFloatStr)) # assertion to obtain only positive input for the initialization
# return tf.cast(tfinit - (tf.constant(b2) / tfinit), dtype=CalcFloatStr) # the inverse of monotonicPos
return (tf.square(tfinit) - b2) / tfinit # the inverse of monotonicPos
# def piecewisePos(res):
# mask = res>=0
# mask2 = ~mask
# res2 = 1.0 / (1.0-res(mask2))
# res(mask2) = res2; # this hyperbola has a value of 1, a slope of 1 and a curvature of 2 at zero X
# res(mask) = abssqr(res(mask)+0.5)+0.75 # this parabola has a value of 1, a slope of 1 and a curvature of 2 at zero X
# def invPiecewisePos(invinput):
# mask=model >= 1.0
# mask2 = ~mask
# res2=model * 0.0
# res2(mask) = sqrt(model(mask) - 0.75)-0.5
# res2(mask2) = (model(mask2)-1.0) / model(mask2)
# res = afkt(res2) # the inverse of monotonicPos
# def forcePositive(self, State):
# for varN, var in State.items():
# State[varN] = self.monotonicPos(State[varN])
# return State
# def Reset():
# tf.compat.v1.reset_default_graph() # clear everything on the GPU
# def Optimize(Fwd,Loss,tfinit,myoptimizer=None,NumIter=40,PreFwd=None):
def Optimize(myoptimizer=None, loss=None, NumIter=40, TBSummary=False, TBSummaryDir="C:\\NoBackup\\TensorboardLogs\\", resVars=None, lossScale=1.0):
"""
performs the tensorflow optimization given a loss function and an optimizer
The optimizer currently also needs to know about the loss, which is a (not-yet evaluated) tensor
Parameters
----------
myoptimizer : an optimizer. See for example "optimizer" and its arguments
loss : the loss() function with no arguments
NumIter (default: 40) : Number of iterations to be used, in case that no optimizer is provided. Otherwise this argument is NOT used but the optimizer knows about the number of iterations.
TBSummary (default: False) : If True, the summary information for tensorboard is stored
TBSummaryDir (default: "C:\\NoBackup\\TensorboardLogs\\") : The directory whre the tensorboard information is stored.
Eager (default: False) : Use eager execution
resVars (default: None) : Which tensors to evaluate and return at the end.
Returns
-------
a tuple of tensors
See also
-------
Example
-------
"""
if myoptimizer is None:
myoptimizer = lambda loss: optimizer(loss, NIter=NumIter) # if none was provided, use the default optimizer
if loss != None:
mystartloss = loss().numpy() * lossScale # eval()
start_time = time.time()
if TBSummary:
summary = myoptimizer()
else:
myoptimizer()
duration = time.time() - start_time
# if TBSummary:
# tb_writer = tf.summary.FileWriter(TBSummaryDir + 'Optimize', session.graph)
# merged = tf.summary.merge_all()
# summary = session.run(merged)
# tb_writer.add_summary(summary, 0)
try:
optName = myoptimizer.optName
except:
optName = "unkown optimizer"
if loss != None:
myloss = loss().numpy() * lossScale
print(optName + ': Exec. time:{:.4}'.format(duration), '. Start L.:{:.4}'.format(mystartloss), ', Final L.:{:.4}'.format(myloss),
'. Relative L.:{:.4}'.format(myloss / mystartloss))
else:
print(optName + ': Exec. time:{:.4}'.format(duration))
if resVars == None and loss != None:
return myloss
else:
res = []
if isinstance(resVars, list) or isinstance(resVars, tuple):
for avar in resVars:
if not isinstance(avar, tf.Tensor) and not isinstance(avar, tf.Variable):
print("WARNING: Variable " + str(avar) + " is NOT a tensor.")
res.append(avar)
else:
try:
res.append(avar.eval())
except ValueError:
print("Warning. Could not evaluate result variable" + avar.name + ". Returning [] for this result.")
res.append([])
else:
res = resVars.eval()
return res
# nip.view(toshow)
def datatype(tfin):
if istensor(tfin):
return tfin.dtype
else:
if isinstance(tfin, np.ndarray):
return tfin.dtype.name
return tfin # assuming this is already the type
def istensor(tfin):
return isinstance(tfin, tf.Tensor) or isinstance(tfin, tf.Variable)
def iscomplex(mytype):
mytype = str(datatype(mytype))
return (mytype == "complex64") or (mytype == "complex128") or (mytype == "complex64_ref") or (mytype == "complex128_ref") or (mytype == "<dtype: 'complex64'>") or (
mytype == "<dtype: 'complex128'>")
def isNumber(val):
return isinstance(val, numbers.Number)
def isList(val):
return isinstance(val, list)
def isTuple(val):
return isinstance(val, tuple)
def removeCallable(ten):
if callable(ten):
return ten()
else:
return ten
def totensor(img):
if istensor(img) or callable(img):
return img
if isList(img):
img = np.array(img, CalcFloatStr)
if not isNumber(img) and ((img.dtype == defaultTFDataType) or (img.dtype == defaultTFCpxDataType)):
img = tf.constant(img)
else:
if iscomplex(img):
img = tf.constant(img, defaultTFCpxDataType)
else:
img = tf.constant(img, defaultTFDataType)
return img
def doCheckScaling(fwd, meas):
sF = tf.reduce_mean(input_tensor=totensor(fwd)).numpy()
sM = tf.reduce_mean(input_tensor=totensor(meas)).numpy()
R = sM / sF
if abs(R) < 0.7 or abs(R) > 1.3:
print("Mean of measured data: " + str(sM) + ", Mean of forward model with initialization: " + str(sF) + " Ratio: " + str(R))
print(
"WARNING!! The forward projected sum is significantly different from the provided measured data. This may cause problems during optimization. To prevent this warning: set checkScaling=False for your loss function.")
return tf.debugging.check_numerics(fwd, "Detected NaN or Inf in loss function") # also checks for NaN values during runtime
def Loss_SimpleGaussian(fwd, meas, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
with tf.compat.v1.name_scope('Loss_SimpleGaussian'):
# return tf.reduce_sum(tf.square(fwd-meas)) # version without normalization
return tf.reduce_mean(
input_tensor=tf.cast(tf.square(fwd - meas), lossDataType)) # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
# %% this section defines a number of loss functions. Note that they often need fixed input arguments for measured data and sometimes more parameters
def Loss_FixedGaussian(fwd, meas, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
with tf.compat.v1.name_scope('Loss_FixedGaussian'):
# return tf.reduce_sum(tf.square(fwd-meas)) # version without normalization
if iscomplex(fwd.dtype.as_numpy_dtype):
mydiff = (fwd - meas)
return tf.reduce_mean(input_tensor=tf.cast(mydiff * tf.math.conj(mydiff), lossDataType)) / \
tf.reduce_mean(input_tensor=tf.cast(meas, lossDataType)) # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
else:
return tf.reduce_mean(input_tensor=tf.cast(tf.square(fwd - meas), lossDataType)) / tf.reduce_mean(
input_tensor=tf.cast(meas, lossDataType)) # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
def Loss_ScaledGaussianReadNoise(fwd, meas, RNV=1.0, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
offsetcorr = tf.cast(tf.reduce_mean(tf.math.log(tf.math.maximum(meas, tf.constant(0.0, dtype=CalcFloatStr)) + RNV)),
lossDataType) # this was added to have the ideal fit yield a loss equal to zero
# with tf.compat.v1.name_scope('Loss_ScaledGaussianReadNoise'):
XMinusMu = tf.cast(meas - fwd, lossDataType)
muPlusC = tf.cast(tf.math.maximum(fwd, 0.0) + RNV, lossDataType) # the clipping at zero was introduced to avoid division by zero
# if tf.reduce_any(RNV == tf.constant(0.0, CalcFloatStr)):
# print("RNV is: "+str(RNV))
# raise ValueError("RNV is zero!.")
# if tf.reduce_any(muPlusC == tf.constant(0.0, CalcFloatStr)):
# print("Problem: Division by zero encountered here")
# raise ValueError("Division by zero HERE!.")
Fwd = tf.math.log(muPlusC) + tf.square(XMinusMu) / muPlusC
# Grad=Grad.*(1.0-2.0*XMinusMu-XMinusMu.^2./muPlusC)./muPlusC;
Fwd = tf.reduce_mean(input_tensor=Fwd)
# if tf.math.is_nan(Fwd):
# if tf.reduce_any(muPlusC == tf.constant(0.0, CalcFloatStr)):
# print("Problem: Division by zero encountered")
# raise ValueError("Division by zero.")
# else:
# raise ValueError("Nan encountered.")
return Fwd # - offsetcorr # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
# @tf.custom_gradient
def Loss_Poisson(fwd, meas, Bg=0.05, checkPos=False, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
with tf.compat.v1.name_scope('Loss_Poisson'):
# meas[meas<0]=0
meanmeas = tf.reduce_mean(meas)
# NumEl=tf.size(meas)
if checkPos:
fwd = ((tf.sign(fwd) + 1) / 2) * fwd
FwdBg = tf.cast(fwd + Bg, lossDataType)
totalError = tf.reduce_mean(input_tensor=(FwdBg - meas) - meas * tf.math.log(
(FwdBg) / (meas + Bg))) / meanmeas # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
# totalError = tf.reduce_mean((fwd-meas) - meas * tf.log(fwd)) / meanmeas # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
# def grad(dy):
# return dy*(1.0 - meas/(fwd+Bg))/meanmeas
# return totalError,grad
return totalError
def Loss_Poisson2(fwd, meas, Bg=0.05, checkPos=False, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
# with tf.compat.v1.name_scope('Loss_Poisson2'):
# meas[meas<0]=0
meanmeas = tf.reduce_mean(meas)
meassize = np.prod(meas.shape)
# NumEl=tf.size(meas)
if checkPos:
fwd = ((tf.sign(fwd) + 1) / 2) * fwd # force positive
# totalError = tf.reduce_mean((fwd-meas) - meas * tf.log(fwd)) / meanmeas # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
@tf.custom_gradient
def BarePoisson(myfwd):
def grad(dy):
mygrad = dy * (1.0 - meas / (myfwd + Bg)) / meassize # the size accounts for the mean operation (rather than sum)
# image_shaped_input = tf.reshape(mygrad, [-1, mygrad.shape[0], mygrad.shape[1], 1])
# tf.summary.image('mygrad', image_shaped_input, 10)
return mygrad
toavg = (myfwd + Bg - meas) - meas * tf.math.log((myfwd + Bg) / (meas + Bg))
toavg = tf.cast(toavg, lossDataType)
totalError = tf.reduce_mean(input_tensor=toavg) # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
return totalError, grad
return BarePoisson(fwd) / meanmeas
# ---- End of code from the inverse Modelling Toolbox
def retrieveData():
import json_to_pandas
dl = json_to_pandas.DataLoader() # instantiate DataLoader #from_back_end=True
data_dict = dl.process_data() # loads and forms the data dictionary
rki_data = data_dict["RKI_Data"] # only RKI dataframe
print('Last Day loaded: ' + str(pd.to_datetime(np.max(rki_data.Meldedatum), unit='ms')))
return rki_data
def deltas(WhenHowMuch, SimTimes):
res = np.zeros(SimTimes)
for w, h in WhenHowMuch:
res[w] = h;
return res
def showResiduum(meas, fit):
res1 = np.mean(meas - fit, (1, 2))
print('Loss: ' + str(np.mean(abs(res1) ** 2)))
plt.plot(res1)
plt.xlabel('days')
plt.ylabel('mean difference / cases')
plt.title('residuum')
def plotAgeGroups(res1, res2):
plt.figure()
plt.title('Age Groups')
plt.plot(res1)
plt.gca().set_prop_cycle(None)
plt.plot(res2, '--')
plt.xlabel('days')
plt.ylabel('population')
class axisType:
const = 'const'
gaussian = 'gaussian'
sigmoid = 'sigmoid'
individual = 'individual'
uniform = 'uniform'
def prependOnes(s1, s2):
l1 = len(s1);
l2 = len(s2)
maxDim = max(l1, l2)
return np.array((maxDim - l1) * [1] + list(s1)), np.array((maxDim - l2) * [1] + list(s2))
def equalShape(s1, s2):
if isinstance(s1, tf.TensorShape):
s1 = s1.as_list()
if isinstance(s2, tf.TensorShape):
s2 = s2.as_list()
s1, s2 = prependOnes(s1, s2)
return np.linalg.norm(s1 - s2) == 0
class Axis:
def ramp(self):
x = self.shape
if isinstance(x, np.ndarray) or isNumber(x) or isTuple(x) or isList(x):
aramp = tf.constant(np.arange(np.max(x)), dtype=CalcFloatStr)
if isNumber(x):
x = [x]
x = tf.reshape(aramp, x) # if you get an error here, the size is not 1D!
else:
x = totensor(x)
return x
def __init__(self, name, numAxis, maxAxes, entries=1, queue=False, labels=None):
self.name = name
self.queue = queue
self.shape = np.ones(maxAxes, dtype=int)
self.shape[-numAxis] = entries
self.curAxis = numAxis
self.Labels = labels
# self.initFkt = self.initZeros()
def __str__(self):
return self.name + ", number:" + str(self.curAxis) + ", is queue:" + str(self.queue)
def __repr__(self):
return self.__str__()
# def initZeros(self):
# return tf.constant(0.0, dtype=CalcFloatStr, shape=self.shape)
#
# def initOnes(self):
# return tf.constant(1.0, dtype=CalcFloatStr, shape=self.shape)
def init(self, vals):
if isNumber(vals):
return tf.constant(vals, dtype=CalcFloatStr, shape=self.shape)
else:
if isinstance(vals, list) or isinstance(vals, np.ndarray):
if len(vals) != np.prod(self.shape):
raise ValueError('Number of initialization values ' + str(len(vals)) + ' of variable ' + self.name + ' does not match its shape ' + str(self.shape))
vals = np.reshape(np.array(vals, dtype=CalcFloatStr), self.shape)
# if callable(vals):
# vshape = vals().shape
# else:
# vshape = vals.shape
# if not equalShape(vshape, self.shape):
# raise ValueError('Initialization shape ' + str(vshape) + ' of variable ' + self.name + ' does not match its shape ' + str(self.shape))
return totensor(vals)
# def initIndividual(self, vals):
# return tf.variable(vals, dtype=CalcFloatStr)
def initGaussian(self, mu=0.0, sig=1.0):
x = self.ramp()
mu = totensor(mu)
sig = totensor(sig)
initVals = tf.exp(-(x - mu) ** 2. / (2 * (sig ** 2.)))
initVals = initVals / tf.reduce_sum(input_tensor=initVals) # normalize (numerical !, since the domain is not infinite)
return initVals
def initDelta(self, pos=0):
x = self.ramp()
initVals = tf.cast(x == pos, CalcFloatStr) # 1.0 *
return initVals
def initSigmoid(self, mu=0.0, sig=1.0, offset=0.0):
"""
models a sigmoidal function starting near 0,
reaching 0.5 at mu and extending to one at inf, the width being controlled by sigma
"""
x = self.ramp()
mu = totensor(mu);
sig = totensor(sig)
initVals = 1. / (1. + tf.exp(-(x - mu) / sig)) + offset
initVals = initVals / tf.reduce_sum(input_tensor=initVals) # normalize (numerical !, since the domain is not infinite)
return initVals
def NDim(var):
if istensor(var):
return var.shape.ndims
else:
return var.ndim
def subSlice(var, dim, sliceStart, sliceEnd): # extracts a subslice along a particular dimension
numdims = NDim(var)
idx = [slice(sliceStart, sliceEnd) if (d == dim or numdims + dim == d) else slice(0, None) for d in range(numdims)]
return var[idx]
def firstSlice(var, dim): # extracts the first subslice along a particular dimension
return subSlice(var, dim, 0, 1)
def lastSlice(var, dim): # extracts the last subslice along a particular dimension
return subSlice(var, dim, -1, None)
def reduceSumTo(State, dst):
# redsz = min(sz1, sz2)
if isinstance(dst, np.ndarray):
dstSize = np.array(dst.shape)
else:
dstSize = np.array(dst.shape.as_list(), dtype=int)
if len(dst.shape) == 0: # i.e. a scalar
dstSize = np.ones(State.ndim, dtype=int)
rs = np.array(State.shape.as_list(), dtype=int)
toReduce = np.nonzero((rs > dstSize) & (dstSize == 1))
toReduce = list(toReduce[0])
if toReduce is not None:
State = tf.reduce_sum(input_tensor=State, axis=toReduce, keepdims=True)
return State
# class State:
# def __init__(self, name='aState'):
# self.name = name
# self.Axes = {}
class Model:
def __init__(self, name='stateModel', maxAxes=4, lossWeight={}, rand_seed=1234567):
self.__version__ = 1.02
self.lossWeight = {}
for varN in lossWeight:
self.lossWeight[varN] = tf.Variable(lossWeight[varN], dtype=defaultTFDataType)
self.name = name
self.maxAxes = maxAxes
self.curAxis = 1
self.QueueStates = {} # stores the queue axis in every entry
self.Axes = {}
self.RegisteredAxes = [] # just to have a convenient way of indexing them
self.State = {} # dictionary of state variables
self.Var = {} # may be variables or lambdas
self.VarDisplayLog = {} # display this variable with a logarithmic slider
self.VarAltAxes = {} # alternative list of axes numbers to interprete the meaning of this multidimensional variable. This is needed for example for matrices connecting a dimension to itself
self.rawVar = {} # saves the raw variables
self.toRawVar = {} # stores the inverse functions to initialize the rawVar
self.toVar = {} # stores the function to get from the rawVar to the Var
self.Original = {} # here the values previous to a distortion are stored (for later comparison)
self.Distorted = {} # here the values previous to a distortion are stored (for later comparison)
self.Simulations = {}
self.Measurements = {}
self.FitResultVals = {} # resulting fit results (e.g. forward model or other curves)
self.FitResultVars = {} # resulting fit variables
self.Rates = [] # stores the rate equations
self.Loss = []
self.ResultCalculator = {} # remembers the variable names that define the results
self.ResultVals = {}
self.Progression = {} # dictionary storing the state and resultVal(s) progression (List per Key)
self.DataDict = {} # used for plotting with bokeh
self.WidgetDict = {} # used for plotting with bokeh
self.FitButton = None
self.FitLossWidget = None
self.FitLossChoices = ['Poisson', 'SimpleGaussian', 'Gaussian', 'ScaledGaussian']
self.FitLossChoiceWidget = None
self.FitOptimChoices = ['L-BFGS', 'SGD','nesterov', 'adam', 'adadelta', 'adagrad']
self.FitOptimChoiceWidget = None
self.FitOptimLambdaWidget = None
self.FitStartWidget = None
self.FitStopWidget = None
self.Regularizations = [] # list tuples of regularizers with type, weight and name of variable e.g. [('TV',0.1, 'R')]
self.plotCumul = False
self.plotMatplotlib = False
np.random.seed(rand_seed)
def timeAxis(self, entries, queue=False, labels=None):
name = 'time'
axis = Axis(name, self.maxAxes, self.maxAxes, entries, queue, labels)
if name not in self.RegisteredAxes:
self.RegisteredAxes.append(axis)
return axis
def addAxis(self, name, entries, queue=False, labels=None):
axis = Axis(name, self.curAxis, self.maxAxes, entries, queue, labels)
self.curAxis += 1
self.Axes[name] = axis
self.RegisteredAxes.append(axis)
def initGaussianT0(self, t0, t, sigma=2.0):
initVals = tf.exp(-(t - t0) ** 2. / (2 * (sigma ** 2.)))
return initVals
def initDeltaT0(self, t0, t, sig=2.0):
initVals = ((t - t0) == 0.0) * 1.0
return initVals
def initSigmoidDropT0(self, t0, t, sig, dropTo=0.0):
initVals = (1. - dropTo) / (1. + tf.exp((t - t0) / sig)) + dropTo
return initVals
def newState(self, name, axesInit=None, makeInitVar=False):
# state = State(name)
# self.States[name]=state
if name in self.ResultCalculator:
raise ValueError('Key ' + name + 'already exists in results.')
elif name in self.State:
raise ValueError('Key ' + name + 'already exists as a state.')
prodAx = None
if not isinstance(axesInit, dict):
if (not isNumber(axesInit)) and (np.prod(removeCallable(axesInit).shape) != 1):
raise ValueError("State " + name + " has a non-scalar initialization but no related axis. Please make it a dictionary with keys being axes names.")
else:
# no changes (like reshape to the original tensors are allowed since this "breaks" the chain of connections
axesInit = {'StartVal': totensor(axesInit)} # so that it can be appended to the time trace
if axesInit is not None:
res = []
hasQueue = False
for AxName, initVal in axesInit.items():
if AxName in self.Axes:
myAxis = self.Axes[AxName]
if (initVal is None):
continue
# initVal = myAxis.init(1.0/np.prod(myAxis.shape, dtype=CalcFloatStr))
if (not isinstance(initVal, Axis) and not callable(initVal)) or isNumber(initVal):
initVal = myAxis.init(initVal)
if myAxis.queue:
if hasQueue:
raise ValueError("Each State can only have one queue axis. This state " + name + " wants to have more than one.")
hasQueue = True
self.QueueStates[name] = myAxis
else:
initVal = totensor(initVal)
if res == []:
res = initVal
elif callable(res):
if callable(initVal):
res = res() * initVal()
else:
res = res() * initVal
else:
if callable(initVal):
res = res * initVal()
else:
res = res * initVal
if makeInitVar: # make the initialization value a variable
prodAx = self.newVariables({name: prodAx}) # initially infected
elif not callable(res):
prodAx = lambda: res
else:
prodAx = res
self.State[name] = prodAx
def newVariables(self, VarList=None, forcePos=True, normalize='max', b2=1.0, overwrite=True, displayLog=True, AltAxes=None):
if VarList is not None:
for name, initVal in VarList.items():
if name in self.Var:
if not overwrite:
raise ValueError("Variable " + name + " was previously defined.")
else:
self.assignNewVar(name, initVal)
print('assigned new value to variable: ' + name)
continue
if name in self.State:
raise ValueError("Variable " + name + " is already defined as a State.")
if name in self.ResultVals:
raise ValueError("Variable " + name + " is already defined as a Result.")
toVarFkt = lambda avar: totensor(avar)
toRawFkt = lambda avar: totensor(avar)
if normalize is not None:
toRawFkt2 = lambda avar: invNormalize(toRawFkt(avar), normalize, initVal);
toVarFkt2 = lambda avar: toVarFkt(doNormalize(avar, normalize, initVal))
else:
toRawFkt2 = toRawFkt
toVarFkt2 = toVarFkt
if forcePos:
toRawFkt3 = lambda avar: invMonotonicPos(toRawFkt2(avar), b2);
toVarFkt3 = lambda avar: toVarFkt2(monotonicPos(avar, b2))
else:
toRawFkt3 = toRawFkt2
toVarFkt3 = toVarFkt2
rawvar = tf.Variable(toRawFkt3(initVal), name=name, dtype=CalcFloatStr)
self.toRawVar[name] = toRawFkt3
self.rawVar[name] = rawvar # this is needed for optimization
self.toVar[name] = toVarFkt3
self.Var[name] = lambda: toVarFkt3(rawvar)
self.VarDisplayLog[name] = displayLog
self.Original[name] = rawvar.numpy() # store the original
self.VarAltAxes[name] = AltAxes
return self.Var[name] # return the last variable for convenience
def restoreOriginal(self, dummy=None):
for varN, rawval in self.Original.items():
self.rawVar[varN].assign(rawval)
self.updateAllWidgets()
def assignWidgetVar(self, newval, varname=None, relval=None, idx=None, showResults=None):
"""
is called when a value has been changed. The coordinates this change refers to are determined by the
drop-down widget list accessible via idx
"""
# print('assignWidgetVar: '+varname+", val:" + str(newval))
mywidget = self.WidgetDict[varname]
if isinstance(mywidget, tuple) or isinstance(mywidget, list):
mywidget = mywidget[0]
self.adjustMinMax(mywidget, newval.new)
if idx is None:
newval = np.reshape(newval.new, self.Var[varname]().shape)
else:
newval = newval.new
idx = self.idxFromDropList(self.Var[varname]().shape, idx)
# print('assigning '+str(newval)+' to index: '+str(idx))
res = self.assignNewVar(varname, newval, relval, idx)
if showResults is not None:
# self.simulate('measured')
showResults()
return res
def assignNewVar(self, varname, newval=None, relval=None, idx=None):
if newval is not None:
newval = self.toRawVar[varname](newval)
else:
newval = self.toRawVar[varname](self.Var[varname]() * relval)
if idx is not None:
# print('Assign Idx: '+str(idx)+", val:" + str(newval))
oldval = self.rawVar[varname].numpy()
oldval[idx] = newval # .flat
newval = oldval
self.rawVar[varname].assign(newval)
return self.rawVar[varname]
def addRate(self, fromState, toState, rate, queueSrc=None, queueDst=None, name=None, hasTime=False, hoSumDims=None, resultTransfer=None, resultScale=None): # S ==> I[0]
if queueSrc is not None:
ax = self.QueueStates[fromState]
if queueSrc != ax.name and queueSrc != "total":
raise ValueError('The source state ' + fromState + ' does not have an axis named ' + queueSrc + ', but it was given as queueSrc.')
if queueDst is not None:
ax = self.QueueStates[toState]
if queueDst != ax.name:
raise ValueError('The destination state ' + toState + ' does not have an axis named ' + queueDst + ', but it was given as queueDst.')
if hoSumDims is not None:
hoSumDims = [- self.Axes[d].curAxis for d in hoSumDims]
self.Rates.append([fromState, toState, rate, queueSrc, queueDst, name, hasTime, hoSumDims, resultTransfer, resultScale])
def findString(self, name, State=None):
if State is None:
State = self.State
if name in self.Var:
return self.Var[name]
elif name in self.Axes:
return self.Axes[name]
elif name in State:
return State[name]
elif name in self.ResultVals:
return self.ResultVals[name]
else:
ValueError('findString: Value ' + name + ' not found in Vars, States or Results')
def reduceToResultByName(self, transferred, resultTransfer, resultScale=None):
Results = {}
if isinstance(resultTransfer, list) or isinstance(resultTransfer, tuple):
resultTransferName = resultTransfer[0]
resultT = tf.reduce_sum(transferred, self.findAxesDims(resultTransfer[1:]), keepdims=True)
else:
resultTransferName = resultTransfer
resultT = transferred
if resultScale is not None:
resultT = resultT * resultScale
if resultTransferName in Results:
if resultT.shape == Results[resultTransferName].shape:
Results[resultTransferName] = Results[resultTransferName] + resultT
else:
raise ValueError('Shape not the same in resultTransfer ' + resultTransferName)
else:
Results[resultTransferName] = resultT
return Results
def applyRates(self, State, time):
toQueue = {} # stores the items to enter into the destination object
Results = {}
# insert here the result variables
OrigStates = State.copy() # copies the dictionary but NOT the variables in it
for fromName, toName, rate, queueSrc, queueDst, name, hasTime, hoSumDims, resultTransfer, resultScale in self.Rates:
if isinstance(rate, str):
rate = self.findString(rate)
higherOrder = None
if isinstance(fromName, list) or isinstance(fromName, tuple): # higher order rate
higherOrder = fromName[1:]
fromName = fromName[0]
fromState = OrigStates[fromName]
if queueSrc is not None:
if queueSrc in self.Axes:
axnum = self.Axes[queueSrc].curAxis
fromState = lastSlice(fromState, -axnum)
elif queueSrc == "total":
pass
else:
raise ValueError("Unknown queue source: " + str(queueSrc) + ". Please select an axis or \"total\".")
if hasTime:
if callable(rate):
if getNumArgs(rate) > 1:
transferred = rate(time,fromState) # calculate the transfer for this rate equation
else:
rate = rate(time) # calculate the transfer for this rate equation
transferred = fromState * rate # calculate the transfer for this rate equation
else:
tf.print("WARNING: hasTime is True, but the rate is not callable!")
transferred = fromState * rate # calculate the transfer for this rate equation
else:
if callable(rate):
if getNumArgs(rate) > 0:
transferred = rate(fromState) # calculate the transfer for this rate equation
else:
rate = rate() # calculate the transfer for this rate equation
transferred = fromState * rate # calculate the transfer for this rate equation
else:
transferred = fromState * rate # calculate the transfer for this rate equation
if higherOrder is not None:
for hState in higherOrder:
if hoSumDims is None:
hoSum = OrigStates[hState]
else:
hoSum = tf.reduce_sum(OrigStates[hState], hoSumDims, keepdims=True)
transferred = transferred * hoSum # apply higher order rates
if resultTransfer is not None:
if isinstance(resultTransfer, list) or isinstance(resultTransfer, tuple):
if isinstance(resultTransfer[0], list) or isinstance(resultTransfer[0], tuple):
for rT in resultTransfer:
Res = self.reduceToResultByName(transferred, rT, resultScale=resultScale)
Results = addDicts(Results, Res)
# State = addDicts(State, Res)
else:
Res = self.reduceToResultByName(transferred, resultTransfer, resultScale=resultScale)
Results = addDicts(Results, Res)
# State = addDicts(State, Res)
else:
Results = addDicts(Results, {resultTransfer: transferred})
# State = addDicts(State, {resultTransfer: transferred})
try:
toState = OrigStates[toName]
except KeyError:
raise ValueError('Error in Rate equation: state "' + str(toName) + '" was not declared. Please use Model.newState() first.')
if queueDst is not None: # handle the queuing
axnum = self.Axes[queueDst].curAxis
if toName in toQueue:
toS, lastAx = toQueue[toName]
else:
toS = firstSlice(toState, -axnum) * 0.0
if queueSrc == 'total':
scalarRate = tf.reduce_sum(transferred, keepdims=True)
toS = toS + reduceSumTo(scalarRate, toS)
else:
toS = toS + reduceSumTo(transferred, toS)
toQueue[toName] = (toS, axnum)
else: # just apply to the destination state
myTransfer = reduceSumTo(transferred, OrigStates[toName])
myTransfer = self.ReduceByShape(OrigStates[toName], myTransfer)
State[toName] = State[toName] + myTransfer
if queueSrc is None or queueSrc == "total":
myTransfer = reduceSumTo(transferred, State[fromName])
transferred = self.ReduceByShape(State[fromName], myTransfer)
State[fromName] = State[fromName] - transferred # the original needs to be individually subtracted!
else:
pass # this dequeing is automatically removed
self.advanceQueues(State, toQueue)
return State, Results
def ReduceByShape(self, State, Transfer):
factor = np.prod(np.array(Transfer.shape) / np.array(State.shape))
if factor != 1.0:
Transfer = Transfer * factor
return Transfer
def advanceQueues(self, State, toQueue):
for queueN in self.QueueStates:
dstState = State[queueN]
if queueN in toQueue: # is this queued state a target of a rate equation?
(dst, axnum) = toQueue[queueN] # unpack the information
myAx = self.QueueStates[queueN]
if axnum != myAx.curAxis:
raise ValueError("The axis " + myAx.name + " of the destination state " + queueN + " of a rate equation does not agree to the axis definition direction.")
else: # advance the state nonetheless, but fill zeros into the entry point
myAx = self.QueueStates[queueN]
axnum = myAx.curAxis
dstShape = dstState.shape.as_list()
dstShape[-axnum] = 1
dst = tf.zeros(dstShape)
# the line below advances the queue
State[queueN] = tf.concat((dst, subSlice(dstState, -axnum, None, -1)), axis=-axnum)
def removeDims(self, val, ndims):
extraDims = len(val.shape) - ndims
if extraDims > 0:
dimsToSqueeze = tuple(np.arange(extraDims))
val = tf.squeeze(val, axis=dimsToSqueeze)
return val
def recordResults(self, State, Results):
# record all States
NumAx = len(self.RegisteredAxes)
for vName, val in State.items():
# if vName in self.State:
val = self.removeDims(val, NumAx)
if vName not in self.Progression:
self.Progression[vName] = [val]
else:
self.Progression[vName].append(val)
# else: # this is a Result item, which may or may not be used in the calculations below
# pass
# raise ValueError('detected a State, which is not in States.')
for resName, res in Results.items():
res = self.removeDims(res, NumAx)
if resName not in self.ResultVals:
self.ResultVals[resName] = [res]
else:
self.ResultVals[resName].append(res)
# now record all calculated result values
for resName, calc in self.ResultCalculator.items():
res = calc(State)
res = self.removeDims(res, NumAx)
if resName not in self.ResultVals:
self.ResultVals[resName] = [res]
else:
self.ResultVals[resName].append(res)
def cleanupResults(self):
for sName, predicted in self.Progression.items():
predicted = tf.stack(predicted)
self.Progression[sName] = predicted
for predictionName, predicted in self.ResultVals.items():
predicted = tf.stack(predicted)
self.ResultVals[predictionName] = predicted
def checkDims(self, State):
for varN, var in State.items():
missingdims = self.maxAxes - len(var.shape)
if missingdims > 0:
newShape = [1] * missingdims + var.shape.as_list()
State[varN] = tf.reshape(var, newShape)
return State
def evalLambdas(self, State):
for varN, var in State.items():
if callable(var):
State[varN] = var()
return State
def traceModel(self, Tmax, verbose=False):
# print("tracing traceModel")
# tf.print("running traceModel")
State = self.State.copy() # to ensure that self is not overwritten
State = self.evalLambdas(State)
State = self.checkDims(State)
self.ResultVals = {}
self.Progression = {}
for t in range(Tmax):
if verbose:
print('tracing time step ' + str(t), end='\r')
tf.print('tracing time step ' + str(t), end='\r')
newState, Results = self.applyRates(State, t)
self.recordResults(State, Results)
State = newState
print()
self.cleanupResults()
# print(" .. done")
return State
def addResult(self, name, anEquation):
if name in self.ResultCalculator:
raise ValueError('Key ' + name + 'already exists in results.')
elif name in self.State:
raise ValueError('Key ' + name + 'already exists as a state.')
else:
self.ResultCalculator[name] = anEquation
# @tf.function
# def quadratic_loss_and_gradient(self, x): # x is a list of fit variables
# return tfp.math.value_and_gradient(
# lambda x: tf.reduce_sum(tf.math.squared_difference(x, self.predicted)), x)
@tf.function
def doBuildModel(self, dictToFit, Tmax, FitStart=0, FitEnd=1e10, oparam={"noiseModel": "Gaussian"}):
print("tracing doBuildModel")
# tf.print("running doBuildModel")
timeStart = time.time()
finalState = self.traceModel(Tmax)
Loss = None
for predictionName, measured in dictToFit.items():
predicted = self.ResultVals[predictionName]
try:
predicted = reduceSumTo(predicted, measured)
if predicted.shape != measured.shape:
raise ValueError('Shapes of simulated data and measured data have to agree. For Variable: ' + predictionName)
# predicted = reduceSumTo(tf.squeeze(predicted), tf.squeeze(measured))
except ValueError:
print('Predicted: ' + predictionName)
print('Predicted shape: ' + str(np.array(predicted.shape)))
print('Measured shape: ' + str(np.array(measured.shape)))
raise ValueError('Predicted and measured data have different shape. Try introducing np.newaxis into measured data.')
self.ResultVals[predictionName] = predicted # .numpy()
myFitEnd = min(measured.shape[0], predicted.shape[0], FitEnd)
if "noiseModel" not in oparam:
noiseModel = "Gaussian"
else:
noiseModel = oparam["noiseModel"]
if self.FitLossChoiceWidget is not None:
noiseModel = self.FitLossChoiceWidget.options[self.FitLossChoiceWidget.value][0]
# print('Noise model: '+noiseModel)
# if predictionName in self.lossWeight:
# fwd = tf.squeeze(self.lossWeight[predictionName] * predicted[FitStart:myFitEnd])
# meas = tf.squeeze(self.lossWeight[predictionName] * measured[FitStart:myFitEnd])
# else:
fwd = tf.squeeze(predicted[FitStart:myFitEnd])
meas = tf.squeeze(measured[FitStart:myFitEnd])
if fwd.shape != meas.shape:
raise ValueError('Shapes of simulated data and measured data have to agree.')
if noiseModel == "SimpleGaussian":
# resid = (fwd - meas)
# thisLoss = tf.reduce_mean(tf.square(resid))
thisLoss = Loss_SimpleGaussian(fwd, meas)
elif noiseModel == "Gaussian":
thisLoss = Loss_FixedGaussian(fwd, meas)
elif noiseModel == "ScaledGaussian":
thisLoss = Loss_ScaledGaussianReadNoise(fwd, meas)
elif noiseModel == "Poisson":
thisLoss = Loss_Poisson2(fwd, meas)
else:
ValueError("Unknown noise model: " + noiseModel)
if predictionName in self.lossWeight:
thisLoss = thisLoss * self.lossWeight[predictionName]
if Loss is None:
Loss = thisLoss
else:
Loss = Loss + thisLoss
timeEnd = time.time()
print('Model build finished: '+str(timeEnd-timeStart)+'s')
return Loss, self.ResultVals, self.Progression
def buildModel(self, dictToFit, Tmax, FitStart=0, FitEnd=1e10):
Loss = lambda: self.doBuildModel(dictToFit, Tmax, FitStart, FitEnd)
return Loss
def simulate(self, resname, varDict={}, Tmax=100, applyPoisson=False, applyGaussian=None):
finalState = self.traceModel(Tmax)
measured = {}
simulated = {}
for name in varDict:
varDict[name] = self.findString(name) # .numpy() # ev()
simulated[name] = varDict[name].numpy()
measured[name] = simulated[name]
if applyPoisson:
mm = np.min(measured[name])
if (mm < 0.0):
raise ValueError('Poisson noise generator discovered a negative number ' + str(mm) + ' in ' + name)
measured[name] = self.applyPoissonNoise(measured[name])
if applyGaussian is not None:
measured[name] = self.applyGaussianNoise(measured[name], sigma=applyGaussian)
self.Simulations[resname] = simulated
self.Measurements[resname] = measured
if applyPoisson or applyGaussian is not None:
toReturn = self.Measurements
else:
toReturn = self.Simulations
if len(toReturn.keys()) == 1:
dict = next(iter(toReturn.values()))
if len(dict.keys()) == 1:
return next(iter(dict.values()))
def applyPoissonNoise(self, data, maxPhotons=None):
if maxPhotons is not None:
if maxPhotons > 0:
return np.random.poisson(maxPhotons * data / np.max(data)).astype(CalcFloatStr)
else:
return data
else:
return np.random.poisson(data).astype(CalcFloatStr)
def applyGaussianNoise(self, data, sigma=1.0):
return np.random.normal(data, scale=sigma).astype(CalcFloatStr)
def toFit(self, listOfVars):
self.FitVars = listOfVars
def appendToFit(self, listOfVars):
self.FitVars = self.FitVars + listOfVars
# def loss_Fn(self):
# return self.Loss
def relDistort(self, var_list):
for name, relDist in var_list.items():
var = self.Var[name]
if callable(var):
self.rawVar[name].assign(self.toRawVar[name](var() * tf.constant(relDist)))
self.Distorted[name] = var().numpy()
else:
self.Var[name].assign(self.Var[name] * relDist)
self.Distorted[name] = var.numpy()
def regTV(self, weight, var, lossFn):
return lambda: lossFn() + weight() * tf.reduce_sum(tf.abs(var()[1:]-var()[:-1]))
def fit(self, data_dict, Tmax, NIter=50, otype='L-BFGS', oparam={"learning_rate": None},
verbose=False, lossScale=None, FitStart=0, FitEnd=1e10, regularizations=None):
# if "normFac" not in oparam:
# oparam["normFac"] = "max"
if self.Loss is None or self.Loss==[]:
needsRebuild = True
else:
needsRebuild = False
if regularizations is None:
regularizations = self.Regularizations
if self.FitButton is not None:
self.FitButton.style.button_color = 'red'
for avar in self.FitVars:
if avar not in self.Var:
raise ValueError('Variable to fit: ' + avar + ' was not found in defined variables in this model.')
for aweight in self.lossWeight:
if aweight not in data_dict:
print('WARNING: ' + aweight + ' was defined as a weight, but no dataset with this name exists! Ignoring entry.')
if "learning_rate" not in oparam:
oparam["learning_rate"] = None
if "noiseModel" not in oparam:
oparam["noiseModel"] = 'Gaussian'
if self.FitOptimLambdaWidget is not None: # overwrite call choice for method
oparam["learning_rate"] = self.FitOptimLambdaWidget.value
if self.FitStartWidget is not None:
FitStart = self.FitStartWidget.value
if self.FitStopWidget is not None:
FitEnd = self.FitStopWidget.value
self.Measurements['measured'] = {}
for predictionName, measured in data_dict.items():
data_dict[predictionName] = tf.constant(measured, CalcFloatStr)
self.Measurements['measured'][predictionName] = data_dict[predictionName] # save as measurement for plot
if self.FitOptimChoiceWidget is not None: # overwrite call choice for method
otype = self.FitOptimChoiceWidget.options[self.FitOptimChoiceWidget.value][0]
if self.FitLossChoiceWidget is not None:
mylossFkt = self.FitLossChoiceWidget.options[self.FitLossChoiceWidget.value][0]
if mylossFkt!=oparam['noiseModel']:
oparam['noiseModel'] = mylossFkt
needsRebuild=True
else:
print('same model reusing compiled model: '+oparam['noiseModel'])
# loss_fn = lambda: self.doBuildModel(data_dict, Tmax, oparam=oparam)
FitVars = [self.rawVar[varN] for varN in self.FitVars]
if needsRebuild:
print('rebuilt model with noise Model: '+oparam['noiseModel'])
loss_fn = lambda: self.doBuildModel(data_dict, Tmax, oparam=oparam, FitStart=FitStart, FitEnd=FitEnd)
# if lossScale == "max":
# lossScale = np.max(data_dict)
if lossScale is not None:
loss_fnOnly = lambda: loss_fn()[0] / lossScale
else:
loss_fnOnly = lambda: loss_fn()[0]
lossScale = 1.0
result_dict = lambda: loss_fn()[1]
progression_dict = lambda: loss_fn()[2]
for reg in regularizations:
regN = reg[0]
weight = self.Var[reg[1]]
var = self.Var[reg[2]]
if regN == "TV":
loss_fnOnly = self.regTV(weight, var, loss_fnOnly)
self.Loss = loss_fnOnly
self.ResultDict = result_dict
self.ProgressDict = progression_dict
else:
loss_fnOnly = self.Loss
result_dict = self.ResultDict
progression_dict = self.ProgressDict
# opt = self.opt
opt = optimizer(loss_fnOnly, otype=otype, oparam=oparam, NIter=NIter, var_list=FitVars, verbose=verbose)
opt.optName = otype # just to store this
self.opt = opt
if NIter > 0:
if self.FitLossWidget is not None:
with self.FitLossWidget: # redirect the output
self.FitLossWidget.clear_output()
res = Optimize(opt, loss=loss_fnOnly, lossScale=lossScale) # self.ResultVals.items()
else:
res = Optimize(opt, loss=loss_fnOnly, lossScale=lossScale) # self.ResultVals.items()
else:
res = loss_fnOnly()
print("Loss is: " + str(res.numpy()))
if np.isnan(res.numpy()):
print('Aborting')
return None,None
if self.FitLossWidget is not None:
self.FitLossWidget.clear_output()
with self.FitLossWidget:
print(str(res.numpy()))
self.ResultVals = result_dict # stores how to calculate results
ResultVals = result_dict() # calculates the results
self.Progression = progression_dict
# Progression = progression_dict()
self.FitResultVars = {'Loss': res}
for varN in self.FitVars:
var = self.Var[varN]
if callable(var):
var = var()
self.FitResultVars[varN] = var.numpy() # res[n]
# for varN in Progression:
# self.Progression[varN] = Progression[varN] # res[n]
for varN in ResultVals:
self.FitResultVals[varN] = ResultVals[varN] # .numpy() # res[n]
if self.FitButton is not None:
self.FitButton.style.button_color = 'green'
return self.FitResultVars, self.FitResultVals
def findAxis(self, d):
"""
d can be an axis label or an axis number
"""
if isinstance(d, str):
return self.Axes[d]
else:
# if d == 0:
# raise ValueError("The axes numbers start with one or be negative!")
if d < 0:
for axN, ax in self.Axes.items():
if ax.curAxis == -d:
return ax
else:
for axN, ax in self.Axes.items():
if ax.curAxis == len(self.Axes) - d:
return ax
# d = -d
raise ValueError("Axis not found.")
def findAxesDims(self, listOfAxesNames):
"""
finds a list of dimension positions from a list of Axis names
"""
listOfAxes = list(listOfAxesNames)
return [- self.findAxis(d).curAxis for d in listOfAxesNames]
def selectDims(self, toPlot, dims=None, includeZero=False):
"""
selects the dimensions to plot and returns a list of labels
The result is summed over all the other dimensions
"""
labels = []
if dims is None:
toPlot = np.squeeze(toPlot)
if toPlot.ndim > 1:
toPlot = np.sum(toPlot, tuple(range(1, toPlot.ndim)))
else:
if not isinstance(dims, list) and not isinstance(dims, tuple):
dims = list([dims])
if includeZero:
rd = list(range(1, toPlot.ndim)) # already exclude the zero axis from being deleted here.
else:
rd = list(range(toPlot.ndim)) # choose all dimensions
for d in dims:
if d == "time" or d == 0:
d = 0
labels.append("time")
else:
ax = self.findAxis(d)
d = - ax.curAxis
labels.append(ax.Labels)
if d < 0:
d = toPlot.ndim + d
rd.remove(d)
toPlot = np.sum(toPlot, tuple(rd))
return toPlot, labels
def showDates(self, Dates, offsetDay=0): # being sunday
plt.xticks(range(offsetDay, len(Dates), 7), [date for date in Dates[offsetDay:-1:7]], rotation=70)
# plt.xlim(45, len(Dates))
plt.tight_layout()
def setPlotCumul(self, val):
"""
toggles the plot mode between cumulative (points) and non-cumulative (bars) plots.
both plots use the same underlying data, which is replaced but the other plot is hidden.
"""
from bokeh.plotting import Figure, ColumnDataSource
from bokeh.io.notebook import CommsHandle
self.plotCumul = val['new']
for fn, f in self.DataDict.items():
# print('looking for figures: ')
# print(f)
if isinstance(f, Figure):
for r in f.renderers:
if r.name.startswith(cumulPrefix):
r.visible = self.plotCumul # shows cumul plots according to settings
else:
r.visible = not self.plotCumul
# print('cleared renderer of figure '+fn+' named: '+f.name)
#if isinstance(f, ColumnDataSource):
# if fn.startswith(cumulPrefix):
# if not self.plotCumul:
# f.data['y'] = None
# else:
# if self.plotCumul:
# f.data['y'] = None
def plotB(self, Figure, x, toPlot, name, color=None, line_dash=None, withDots=False, useBars=True, allowCumul=True):
# create a column data source for the plots to share
from bokeh.models import ColumnDataSource
myPrefix = '_'
if self.plotCumul and allowCumul is True:
toPlot = np.cumsum(toPlot, 0)
useBars = False
myPrefix = cumulPrefix
mylegend = name + "_cumul"
else:
mylegend = name
# print('Cumul: set useBars to False')
if isinstance(x, pd.core.indexes.datetimes.DatetimeIndex):
msPerDay = 0.6 * 1000.0 * 60 * 60 * 24
else:
msPerDay = 0.6
if self.plotMatplotlib:
plt.plot(x,toPlot, label=mylegend)
plt.legend()
return
if myPrefix + name not in self.DataDict:
# print('replotting: '+name)
if name not in self.DataDict:
source = ColumnDataSource(data=dict(x=x, y=toPlot))
self.DataDict[name] = source
else:
# print('Updating y-data of: ' + name)
self.DataDict[name].data['y'] = toPlot
source = self.DataDict[name]
if useBars:
if withDots:
r = Figure.circle('x', 'y', line_width=1.5, alpha=0.9, color=color, source=source, name=myPrefix + name)
r.visible = True
r = Figure.vbar('x', top='y', width=msPerDay, alpha=0.6, color=color, source=source, legend_label=mylegend, name=myPrefix + name)
r.visible = True
else:
r = Figure.line('x', 'y', line_width=1.5, alpha=0.8, color=color, line_dash=line_dash, legend_label=mylegend, source=source, name=myPrefix + name)
r.visible = True
else:
if withDots:
r = Figure.circle('x', 'y', line_width=1.5, alpha=0.8, color=color, source=source, name=myPrefix + name)
r.visible = True
r = Figure.line('x', 'y', line_width=1.5, alpha=0.8, color=color, line_dash=line_dash, legend_label=mylegend, source=source, name=myPrefix + name)
r.visible = True
#print('First plot of: '+name)
else:
#print('Updating y-data of: '+name)
self.DataDict[name].data['y'] = toPlot
self.DataDict[myPrefix + name] = self.DataDict[name]
Figure.legend.click_policy = "hide"
def getDates(self, Dates, toPlot):
if Dates is None:
return np.arange(toPlot.shape[0])
if Dates is not None and len(Dates) < toPlot.shape[0]:
Dates = pd.date_range(start=Dates[0], periods=toPlot.shape[0]).map(lambda x: x.strftime('%d.%m.%Y'))
return pd.to_datetime(Dates, dayfirst=True)
def showResultsBokeh(self, title='Results', xlabel='time step', Scale=False, xlim=None, ylim=None,
ylabel='probability', dims=None, legendPlacement='upper left', Dates=None, offsetDay=0, logY=True,
styles=['dashed', 'solid', 'dotted', 'dotdash', 'dashdot'], figsize=None, subPlot=None,
dictToPlot=None, initMinus=None, allowCumul=True):
from bokeh.plotting import figure # output_file,
from bokeh.palettes import Dark2_5 as palette
from bokeh.io import push_notebook, show
import itertools
plotMatplotlib = self.plotMatplotlib
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select"
# x = np.linspace(0, 2 * np.pi, 2000)
# y = np.sin(x)
# r = p.line(x, y, color="#8888cc", line_width=1.5, alpha=0.8)
# return p
# if figsize is None:
# figsize = (600, 300)
# p = figure(title=title, plot_height=300, plot_width=600, y_range=(-5, 5),
# background_fill_color='#efefef', tools=TOOLS)
# x = np.linspace(0, 2 * np.pi, 2000)
# y = np.sin(x)
# r = p.line(x, y, color="#8888cc", line_width=1.5, alpha=0.8)
# return p
n = 0
if subPlot is None:
FigureIdx = '_figure'
FigureTitle = title
else:
FigureIdx = '_figure_' + subPlot
FigureTitle = title + '_' + subPlot
if Scale is False:
Scale = None
if dictToPlot is None:
dictMeas = self.Measurements
dictFits = self.FitResultVals
else:
dictMeas = {}
if callable(dictToPlot):
dictFits = dictToPlot()
else:
dictFits = dictToPlot
if initMinus is not None:
if isinstance(initMinus, str):
initMinus = [initMinus]
else:
initMinus = []
self.DataDict['_title'] = FigureTitle
if plotMatplotlib:
self.DataDict[FigureIdx] = plt.figure()
plt.title(self.DataDict['_title'])
plt.xlabel('time')
plt.ylabel(ylabel)
newFigure = True
else:
if FigureIdx not in self.DataDict:
#print('New Figure: ' + FigureIdx+'\n')
if Dates is not None:
self.DataDict[FigureIdx] = figure(title=self.DataDict['_title'], plot_height=400, plot_width=900,
background_fill_color='#efefef', tools=TOOLS, x_axis_type='datetime', name=FigureIdx)
self.DataDict[FigureIdx].xaxis.major_label_orientation = np.pi / 4
else:
self.DataDict[FigureIdx] = figure(title=self.DataDict['_title'], plot_height=400, plot_width=900,
background_fill_color='#efefef', tools=TOOLS, name=FigureIdx)
self.DataDict[FigureIdx].xaxis.axis_label = 'time'
self.DataDict[FigureIdx].yaxis.axis_label = ylabel
newFigure = True
else:
newFigure = False
# show(self.DataDict['_figure'], notebook_handle=True)
# if ylabel is not None:
# self.resultFigure.yaxis.axis_label = ylabel
colors = itertools.cycle(palette)
for resN, dict in dictMeas.items():
style = styles[n % len(styles)]
for dictN, toPlot in dict.items():
if (subPlot is not None) and (dictN not in subPlot):
continue
toPlot, labels = self.selectDims(toPlot, dims=dims, includeZero=True)
toPlot = np.squeeze(toPlot)
if Scale is not None:
toPlot = toPlot * Scale
# r.data_source.data['y'] = toPlot # styles[n]
if toPlot.ndim > 1:
colors = itertools.cycle(palette)
mydim=0;
for d in range(len(labels)):
if len(labels[d]) > 1:
mydim = d
for d, color in zip(range(toPlot.shape[1]), colors):
x = self.getDates(Dates, toPlot)
alabel = labels[mydim][d]
self.plotB(self.DataDict[FigureIdx], x, toPlot[:, d], name=resN + "_" + dictN + "_" + alabel, withDots=True, color=color, line_dash=style, allowCumul=allowCumul)
# labels[0][d]
else:
color = next(colors)
x = self.getDates(Dates, toPlot)
if labels == [] or labels[0].shape == (0,): # labels == [[]]:
labels = [[dictN]]
self.plotB(self.DataDict[FigureIdx], x, toPlot, name=resN + "_" + dictN + "_" + labels[0][0], withDots=True, color=color, line_dash=style, allowCumul=allowCumul)
n += 1
for dictN, toPlot in dictFits.items():
if (subPlot is not None) and (dictN not in subPlot):
continue
if callable(toPlot): # the Var dictionary contains callable variables
toPlot = toPlot()
style = styles[n % len(styles)]
if dictN in initMinus:
V0 = 1.0
if dictN in self.State:
V0 = self.State[dictN]().numpy()
#print('Showing '+dictN+' V0 is '+str(V0))
toPlot = V0 - toPlot
dictN = '('+dictN+'_0-' + dictN+')'
toPlot, labels = self.selectDims(toPlot, dims=dims, includeZero=True)
toPlot = np.squeeze(toPlot)
if Scale is not None:
toPlot = toPlot * Scale
for d in range(len(labels)):
if len(labels[d]) > 1:
mydim = d
if toPlot.ndim > 1:
colors = itertools.cycle(palette)
for d, color in zip(range(toPlot.shape[1]), colors):
x = self.getDates(Dates, toPlot)
alabel = labels[mydim][d]
self.plotB(self.DataDict[FigureIdx], x, toPlot[:, d], name="Fit_" + dictN + "_" + alabel, color=color, line_dash=style, allowCumul=allowCumul)
else:
color = next(colors)
x = self.getDates(Dates, toPlot)
self.plotB(self.DataDict[FigureIdx], x, toPlot, name="Fit_" + dictN, color=color, line_dash=style, allowCumul=allowCumul)
# if xlim is not None:
# plt.xlim(xlim[0],xlim[1])
# if ylim is not None:
# plt.ylim(ylim[0],ylim[1])
# push_notebook()
if newFigure and not plotMatplotlib:
#print('showing figure: '+FigureIdx)
try:
self.DataDict[FigureIdx + '_notebook_handle'] = show(self.DataDict[FigureIdx], notebook_handle=True)
except:
print('Warnings: Figures are not showing, probably due to being called from console')
else:
#print('pushing notebook')
if plotMatplotlib:
return
else:
push_notebook(handle=self.DataDict[FigureIdx + '_notebook_handle'])
def showResults(self, title='Results', xlabel='time step', Scale=False, xlim=None, ylim=None, ylabel='probability', dims=None, legendPlacement='upper left', Dates=None,
offsetDay=0, logY=True, styles=['.', '-', ':', '--', '-.', '*'], figsize=None):
if logY:
plot = plt.semilogy
else:
plot = plt.plot
# Plot results
if figsize is not None:
plt.figure(title, figsize=figsize)
else:
plt.figure(title)
plt.title(title)
legend = []
n = 0
# for resN, dict in self.Simulations.items():
# style = styles[n]
# n+=1
# for dictN, toPlot in dict.items():
# plt.plot(toPlot, style)
# legend.append(resN + "_" + dictN)
# n=0
for resN, dict in self.Measurements.items():
for dictN, toPlot in dict.items():
toPlot, labels = self.selectDims(toPlot, dims=dims, includeZero=True)
toPlot = np.squeeze(toPlot)
if Scale is not None and Scale is not False:
toPlot *= Scale
plot(toPlot, styles[n % len(styles)])
if toPlot.ndim > 1:
for d in range(toPlot.shape[1]):
legend.append(resN + "_" + dictN + "_" + labels[0][d])
else:
legend.append(resN + "_" + dictN)
plt.gca().set_prop_cycle(None)
n += 1
for dictN, toPlot in self.FitResultVals.items():
toPlot, labels = self.selectDims(toPlot, dims=dims, includeZero=True)
toPlot = np.squeeze(toPlot)
if Scale is not None and Scale is not False:
toPlot *= Scale
plot(toPlot, styles[n % len(styles)])
if toPlot.ndim > 1:
for d in range(toPlot.shape[1]):
legend.append("Fit_" + dictN + "_" + labels[0][d])
else:
legend.append("Fit_" + "_" + dictN)
plt.legend(legend, loc=legendPlacement)
if Dates is not None:
self.showDates(Dates, offsetDay)
elif xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
def sumOfStates(self, Progression, sumcoords=None):
sumStates = 0
if sumcoords is None:
sumcoords = tuple(np.arange(self.maxAxes + 1)[1:])
for name, state in Progression.items():
sumStates = sumStates + np.sum(state.numpy(), axis=sumcoords)
return sumStates
def showStates(self, title='States', exclude={}, xlabel='time step', ylabel='probability', dims=None, dims2d=[0, -1], MinusOne=[], legendPlacement='upper left', Dates=None,
offsetDay=0, logY=False, xlim=None, ylim=None, figsize=None):
if logY:
plot = plt.semilogy
else:
plot = plt.plot
if figsize is not None:
plt.figure(title, figsize=figsize)
else:
plt.figure(title)
plt.title(title)
# Plot the state population
legend = []
if callable(self.Progression):
Progression = self.Progression()
else:
Progression = self.Progression
sumStates = np.squeeze(self.sumOfStates(Progression, (1, 2, 3)))
initState = sumStates[0]
meanStates = np.mean(sumStates)
maxDiff = np.max(abs(sumStates - initState))
print("Sum of states deviates by: " + str(maxDiff) + ", from the starting state:" + str(initState) + ". relative: " + str(maxDiff / initState))
N = 1
for varN in Progression:
if varN not in exclude:
sh = np.array(Progression[varN].shape, dtype=int)
pdims = np.nonzero(sh > 1)
toPlot = Progression[varN] # np.squeeze(
myLegend = varN
if np.squeeze(toPlot).ndim > 1:
if dims2d is not None and len(self.Axes) > 1:
plt.figure(10 + N)
plt.ylabel(xlabel)
N += 1
plt.title("State " + varN)
toPlot2, labels = self.selectDims(toPlot, dims=dims2d)
toPlot2 = np.squeeze(toPlot2)
if toPlot2.ndim > 1:
plt.imshow(toPlot2, aspect="auto")
# plt.xlabel(self.RegisteredAxes[self.maxAxes - pdims[0][1]].name)
plt.xlabel(dims2d[1])
plt.xticks(range(toPlot2.shape[1]), labels[1], rotation=70)
plt.colorbar()
toPlot, labels = self.selectDims(toPlot, dims=dims)
myLegend = myLegend + " (summed)"
if varN in MinusOne:
toPlot = toPlot - 1.0
myLegend = myLegend + "-1"
# plt.figure(10)
plot(np.squeeze(toPlot))
legend.append(myLegend)
plt.legend(legend, loc=legendPlacement)
if Dates is not None:
self.showDates(Dates, offsetDay)
elif xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
def compareFit(self, maxPrintSize=10, dims=None, fittedVars=None, legendPlacement='upper left', Dates=None, offsetDay=0):
for varN, orig in self.Original.items():
fit = np.squeeze(totensor(removeCallable(self.Var[varN])).numpy())
orig = np.squeeze(self.toVar[varN](orig).numpy()) # convert from rawVar to Var
if varN not in self.Distorted:
dist = orig
else:
dist = self.Distorted[varN]
if isNumber(fit) or np.prod(fit.shape) < maxPrintSize:
if fittedVars is not None:
if varN in fittedVars:
print("\033[1;32;49m")
else:
print("\033[1;31;49m")
print("Comparison " + varN + ", Distorted:" + str(dist) + ", Original: " + str(orig) + ", fit: " + str(fit) + ", rel. differenz:" + str(
np.max((fit - orig) / orig)))
print("\033[0;37;49m")
else:
plt.figure("Comparison " + varN)
dist, labelsD = self.selectDims(dist, dims=dims)
plt.plot(dist)
orig, labelsO = self.selectDims(orig, dims=dims)
plt.plot(orig)
fit, labelsF = self.selectDims(fit, dims=dims)
plt.plot(fit)
plt.legend(["distorted", "original", "fit"], loc=legendPlacement)
if Dates is not None:
self.showDates(Dates, offsetDay)
def toggleInFit(self, toggle, name):
if toggle['new']:
# print('added '+name)
self.FitVars.append(name)
else:
# print('removed '+name)
self.FitVars.remove(name)
def idxFromDropList(self, varshape, dropWidgets):
varshape = list(varshape)
myindex = len(varshape) * [0, ] # empty list to index
idxnum = 0
for d, s in zip(range(len(varshape)), varshape):
if s > 1:
myindex[d] = dropWidgets[idxnum].value
idxnum += 1
idx = tuple(myindex)
return idx
def assignToWidget(self, idx, allDrop=None, varN=None, widget=None):
"""
This function assignes a new value to the value widget taking the index from the drop widgets
"""
if varN in self.Var:
myvar = self.Var[varN]()
else:
myvar = self.lossWeight[varN]()
myidx = self.idxFromDropList(myvar.shape, allDrop)
val = myvar[myidx] # idx['new']
self.adjustMinMax(widget, val)
widget.value = val
#print('assignToWidget, varN: '+varN+', idx='+str(idx['new'])+', val:'+str(val)+', widget: '+widget.description)
def adjustMinMax(self, widget, val):
# print('AdjustMinMax: '+str(widget.min)+', val. '+ str(val) +', max:'+str(widget.max))
from ipywidgets import widgets
if isinstance(widget, widgets.FloatLogSlider):
lval = np.log10(val)
else:
lval = val
if isinstance(widget, widgets.FloatLogSlider) or isinstance(widget, widgets.FloatSlider):
if lval <= widget.min + (widget.max - widget.min) / 10.0:
widget.min = lval - 1.0
if lval >= widget.max - (widget.max - widget.min) / 10.0:
widget.max = lval + 1.0
# print('post AdjustMinMax: '+str(widget.min)+', val. '+ str(val) +', max:'+str(widget.max))
def updateAllWidgets(self, dummy=None):
# print('updateAllWidgets')
for varN, w in self.WidgetDict.items():
newval = self.Var[varN]()
if isinstance(w, tuple):
idx = self.idxFromDropList(newval.shape, w[1]) # w[1].value
for n in range(np.squeeze(newval).shape[0]):
val = np.squeeze(newval)[n]
self.adjustMinMax(w[0], val)
w[0].value = newval[idx] # np.squeeze(
else:
val = newval
w.value = val
def getValueWidget(self, myval, varN):
from ipywidgets import widgets, Layout
item_layout = Layout(display='flex', flex_flow='row', justify_content='space-between', width='50%')
valueWidget = widgets.FloatText(value=myval, layout = item_layout)
# if self.VarDisplayLog[varN]:
# mymin = np.round(np.log10(myval)) - 1
# mymax = np.round(np.log10(myval)) + 1
# valueWidget = widgets.FloatLogSlider(value=myval, base=10, min=mymin, max=mymax)
# else:
# mymin = 0.0
# mymax = myval * 3.0
# valueWidget = widgets.FloatSlider(value=myval, min=mymin, max=mymax)
return valueWidget
def updateWidgetFromDropDict(self, idx, dict, dropWidget, valWidget):
dictKey = dropWidget.options[dropWidget.value][0]
valWidget.value = dict[dictKey].numpy()
def assignToDictVal(self, newval, dict, dropWidget):
dictKey = dropWidget.options[dropWidget.value][0]
dict[dictKey].assign(newval.new)
def dictWidget(self, dict, description):
from ipywidgets import widgets, Layout
import functools
options = [(d, n) for d, n in zip(dict.keys(), range(len(dict.keys())))]
dropWidget = widgets.Dropdown(options=options, indent=False, description=description) # value=0,
item_layout = Layout(display='flex', flex_flow='row', justify_content='space-between', width='100%')
box_layout = Layout(display='flex', flex_flow='column', border='solid 2px', align_items='stretch', width='40%')
valueWidget = widgets.FloatText(value=dict[options[0][0]].numpy(), layout=item_layout)
# valueWidget = widgets.HBox((inFitWidget,valueWidget))
dropWidget.observe(functools.partial(self.updateWidgetFromDropDict, dict=dict, dropWidget=dropWidget, valWidget=valueWidget), names='value')
# showResults= showResults
valueWidget.observe(functools.partial(self.assignToDictVal, dict=dict, dropWidget=dropWidget), names='value')
# widget = widgets.HBox((dropWidget, valueWidget))
widget = widgets.Box((dropWidget, valueWidget), layout=box_layout)
# self.WidgetDict[varN] = (valueWidget, dropWidget)
return widget
def getVarValueDict(self):
vals={}
for vname,v in self.rawVar.items():
vals[vname]=v.numpy()
return vals
def setVarByValueDict(self, vals):
for vname,v in self.rawVar.items():
try:
val = vals[vname]
v.assign(val)
except:
print('Could not find an entry for variable '+vname)
def SaveVars(self, filename):
print('Saving file: '+filename)
vals = self.getVarValueDict()
np.save(filename, vals)
return
def LoadVars(self, filename):
print('Loading file: '+filename)
vals = np.load(filename, allow_pickle=True).item()
self.setVarByValueDict(vals)
return
def getGUI(self, fitVars=None, nx=3, showResults=None, doFit=None, Dates=None):
from ipywidgets import widgets, Layout
from IPython.display import display
import functools
item_layout = Layout(display='flex', flex_flow='row', justify_content='space-between')
small_item_layout = Layout(display='flex', flex_flow='row', justify_content='space-between', width='15%')
box_layout = Layout(display='flex', flex_flow='column', border='solid 2px', align_items='stretch', width='40%')
box2_layout = Layout(display='flex', flex_flow='row', justify_content='space-between', border='solid 2px', align_items='stretch', width='40%')
output_layout = Layout(display='flex', flex_flow='row', justify_content='space-between', border='solid 2px', align_items='stretch', width='300px')
tickLayout = Layout(display='flex', width='30%')
if fitVars is None:
fitVars = self.FitVars
allWidgets = {}
horizontalList = []
px = 0
for varN in fitVars: # this loop builds the controllers for each variable that can be used to fit
var = self.Var[varN]().numpy()
if var.ndim > 0 and np.prod(np.array(var.shape)) > 1:
# mydim = var.ndim - np.nonzero(np.array(var.shape) - 1)[0][0]
allDrop = []
altAxes = self.VarAltAxes[varN]
for mydim in range(len(var.shape)):
if var.shape[mydim]>1:
if mydim == 0:
regdim = -1
else:
regdim = var.ndim-mydim - 1
if altAxes is not None:
regdim = altAxes[mydim]
if isinstance(regdim,str):
try:
regdim = self.Axes[regdim].curAxis-1 # convert name to axis dimension
except:
raise ValueError('Cound not find axis: '+regdim)
ax = self.RegisteredAxes[regdim]
if ax.Labels is None:
options = [(str(d), d) for d in range(np.prod(ax.shape))]
else:
options = [(ax.Labels[d], d) for d in range(len(ax.Labels))]
drop = widgets.Dropdown(options=options, indent=False, value=0, description=ax.name)
allDrop.append(drop)
# dropWidget = widgets.Box(allDrop, display='flex', layout=box_layout)
inFitWidget = widgets.Checkbox(value=(varN in self.FitVars), indent=False, layout=tickLayout, description=varN)
inFitWidget.observe(functools.partial(self.toggleInFit, name=varN), names='value')
valueWidget = self.getValueWidget(np.squeeze(var.flat)[0], varN)
valueWidgetBox = widgets.HBox((inFitWidget, valueWidget), layout=item_layout) # widgets.Label(varN),
# valueWidget = widgets.HBox((inFitWidget,valueWidget))
widget = widgets.Box(allDrop + [valueWidgetBox], layout=box_layout)
# do NOT use lambda below, as it does not seem to work in a for loop here!
# valueWidget.observe(lambda val: self.assignNewVar(varN, val.new, idx=drop.value), names='value')
for drop in allDrop:
drop.observe(functools.partial(self.assignToWidget, allDrop=allDrop, varN=varN, widget=valueWidget), names='value')
# showResults= showResults
valueWidget.observe(functools.partial(self.assignWidgetVar, varname=varN, idx=allDrop), names='value')
self.WidgetDict[varN] = (valueWidget, allDrop)
px += 1
else:
inFitWidget = widgets.Checkbox(value=(varN in self.FitVars), indent=False, layout=tickLayout, description=varN)
inFitWidget.observe(functools.partial(self.toggleInFit, name=varN), names='value')
valueWidget = self.getValueWidget(np.squeeze(var), varN)
widget = widgets.HBox((inFitWidget, valueWidget), display='flex', layout=box2_layout)
# showResults=showResults
valueWidget.observe(functools.partial(self.assignWidgetVar, varname=varN), names='value')
self.WidgetDict[varN] = valueWidget
px += 1
# widget.manual_name = varN
allWidgets[varN] = widget
horizontalList.append(widget)
if px >= nx:
widget = widgets.HBox(horizontalList)
horizontalList = []
px = 0
display(widget)
widget = widgets.HBox(horizontalList)
horizontalList = []
px = 0
display(widget)
lastRow = []
if showResults is not None:
radioCumul = widgets.Checkbox(value=self.plotCumul, indent=False, layout=tickLayout, description='cumul.')
radioCumul.observe(self.setPlotCumul, names='value')
# drop.observe(functools.partial(self.assignToWidget, varN=varN, widget=valueWidget), names='value')
PlotWidget = widgets.Button(description='Plot')
PlotWidget.on_click(showResults)
lastRow.append(PlotWidget)
lastRow.append(radioCumul)
out = widgets.Output()
LoadWidget = SelectFilesButton(out,CallBack=self.LoadVars, Load=True)
widgets.VBox([LoadWidget, out])
# LoadWidget = widgets.Button(description='Load')
# LoadWidget.on_click(self.LoadVars)
lastRow.append(LoadWidget)
SaveWidget = SelectFilesButton(out,CallBack=self.SaveVars,Load=False) # description='Save'
# SaveWidget.on_click(self.SaveVars)
widgets.VBox([SaveWidget, out])
lastRow.append(SaveWidget)
ResetWidget = widgets.Button(description='Reset')
ResetWidget.on_click(self.restoreOriginal)
ResetWidget.observe(self.updateAllWidgets)
lastRow.append(ResetWidget)
if doFit is not None:
doFitWidget = widgets.Button(description='Fit')
self.FitButton = doFitWidget
lastRow.append(doFitWidget)
options = [(self.FitLossChoices[d], d) for d in range(len(self.FitLossChoices))]
drop = widgets.Dropdown(options = options, indent=False, value=0)
self.FitLossChoiceWidget = drop
lastRow.append(drop)
widget = widgets.HBox(lastRow)
display(widget)
lastRow = []
options = [(self.FitOptimChoices[d], d) for d in range(len(self.FitOptimChoices))]
self.FitOptimChoiceWidget = widgets.Dropdown(options = options, indent=False, value=0)
self.FitOptimLambdaWidget = widgets.FloatText(value=1.0, layout=item_layout, indent=False, description='LearningRate')
widget = widgets.Box((self.FitOptimChoiceWidget, self.FitOptimLambdaWidget), layout=box_layout)
lastRow.append(widget)
nIterWidget = widgets.IntText(value=100, description='NIter:', indent=False, layout=small_item_layout)
# else:
# self.FitOptimLambdaWidget = widgets.IntText(value=0, indent=False, description='StartDay')
# lastRow.append(drop)
# self.FitOptimLambdaWidget = widgets.IntText(value=-1, indent=False, description='StopDay')
# lastRow.append(drop)
doFitWidget.on_click(lambda b: self.updateAllWidgets(doFit(NIter=nIterWidget.value)))
lastRow.append(nIterWidget)
weightWidget = self.dictWidget(self.lossWeight, description='Weights:')
lastRow.append(weightWidget)
lossWidget = widgets.Output(description='Loss:', layout=output_layout)
if Dates is not None:
options = [(Dates[d], d) for d in range(len(Dates))]
self.FitStartWidget = widgets.Dropdown(options=options, indent=False, description='StartDate', value=0)
self.FitStopWidget = widgets.Dropdown(options=options, indent=False, description='StopDate', value=len(Dates) - 4)
widget = widgets.Box((self.FitStartWidget,self.FitStopWidget), layout=box_layout)
lastRow.append(widget)
self.FitLossWidget = lossWidget
lastRow.append(lossWidget)
else:
self.FitButton = None
self.FitLossWidget = None
self.FitLossChoiceWidget = None
widget = widgets.HBox(lastRow)
display(widget)
if showResults is not None:
showResults()
return allWidgets
# --------- Stuff concerning loading data
def getMeasured(params={}):
param = {'oldFormat': True, 'IndividualLK': True, 'TwoLK': False, 'IndividualAge': True, 'UseGender': False}
param = {**param, **params} # overwrite the defaults without destroying them
results = {}
if param['oldFormat']:
dat = retrieveData() # loads the data from the server
else:
pass
# dat = data_update_handlers.fetch_data.DataFetcher.fetch_german_data()
PopTotalLK = dat.groupby(by='IdLandkreis').first()["Bev Insgesamt"] # .to_dict() # population of each district
TPop = np.sum(PopTotalLK)
# dat = dat[dat.IdLandkreis == 9181];
if not param['IndividualLK']:
# dat['AnzahlFall'] = dat.groupby(by='IdLandkreis').sum()['AnzahlFall']
dat['IdLandkreis'] = 1
dat['Landkreis'] = 'BRD'
dat['Bev Insgesamt'] = TPop
if param['TwoLK']:
dat2 = retrieveData()
dat2['IdLandkreis'] = 2
dat2['Landkreis'] = 'DDR'
dat['Bev Insgesamt'] = TPop
dat2['Bev Insgesamt'] = TPop
dat = | pd.concat([dat, dat2], axis=0) | pandas.concat |
# EIA_CBECS_Land.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
2012 Commercial Buildings Energy Consumption Survey (CBECS)
https://www.eia.gov/consumption/commercial/reports/2012/energyusage/index.php
Last updated: Monday, August 17, 2020
"""
import io
import pandas as pd
import numpy as np
from flowsa.location import US_FIPS, get_region_and_division_codes
from flowsa.common import WITHDRAWN_KEYWORD, \
clean_str_and_capitalize, fba_mapped_default_grouping_fields
from flowsa.settings import vLogDetailed
from flowsa.flowbyfunctions import assign_fips_location_system, aggregator
from flowsa.literature_values import \
get_commercial_and_manufacturing_floorspace_to_land_area_ratio
from flowsa.validation import calculate_flowamount_diff_between_dfs
def eia_cbecs_land_URL_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into
Flow-By-Activity format
"""
# initiate url list for coa cropland data
urls = []
# replace "__xlsx_name__" in build_url to create three urls
for x in config['xlsx']:
url = build_url
url = url.replace("__xlsx__", x)
urls.append(url)
return urls
def eia_cbecs_land_call(*, resp, url, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param url: string, url
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
df_raw_data = pd.read_excel(io.BytesIO(resp.content),
sheet_name='data')
df_raw_rse = pd.read_excel(io.BytesIO(resp.content),
sheet_name='rse')
if "b5.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data = | pd.DataFrame(df_raw_data.loc[15:32]) | pandas.DataFrame |
import sys
sys.path.append(r'simulation_tool/') # multi_modal_simulation is found here
import ast
import muse_sc as muse
from multi_modal_simulation import multi_modal_simulator
import pandas as pd
import phenograph
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
import matplotlib.pyplot as plt
import tensorflow as tf
#import umap
tf.get_logger().setLevel('ERROR')
np.random.seed(0)
latent_dim = 100
num_cluster = 10
sample_size = 1000
latent_code_dim = 30
observed_data_dim = 500
sigma_1 = 0.1
sigma_2 = 0.1
decay_coef_1 = 0.5
decay_coef_2 = 0.1
merge_prob = 0.7
dataset_a = pd.read_csv('/exports/reum/tdmaarseveen/RA_Clustering/data/6_clustering/INDIVIDUAL_mannequin_categorical_ohe.csv', sep=',')
dataset_b = | pd.read_csv('/exports/reum/tdmaarseveen/RA_Clustering/data/6_clustering/df_tfidf.csv', sep=',') | pandas.read_csv |
from __future__ import annotations
from pandas._typing import (
FilePath,
ReadBuffer,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.inference import is_integer
from pandas.core.frame import DataFrame
from pandas.io.common import get_handle
from pandas.io.parsers.base_parser import ParserBase
class ArrowParserWrapper(ParserBase):
"""
Wrapper for the pyarrow engine for read_csv()
"""
def __init__(self, src: FilePath | ReadBuffer[bytes], **kwds):
self.kwds = kwds
self.src = src
ParserBase.__init__(self, kwds)
self._parse_kwds()
def _parse_kwds(self):
"""
Validates keywords before passing to pyarrow.
"""
encoding: str | None = self.kwds.get("encoding")
self.encoding = "utf-8" if encoding is None else encoding
self.usecols, self.usecols_dtype = self._validate_usecols_arg(
self.kwds["usecols"]
)
na_values = self.kwds["na_values"]
if isinstance(na_values, dict):
raise ValueError(
"The pyarrow engine doesn't support passing a dict for na_values"
)
self.na_values = list(self.kwds["na_values"])
def _get_pyarrow_options(self):
"""
Rename some arguments to pass to pyarrow
"""
mapping = {
"usecols": "include_columns",
"na_values": "null_values",
"escapechar": "escape_char",
"skip_blank_lines": "ignore_empty_lines",
}
for pandas_name, pyarrow_name in mapping.items():
if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
self.parse_options = {
option_name: option_value
for option_name, option_value in self.kwds.items()
if option_value is not None
and option_name
in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
}
self.convert_options = {
option_name: option_value
for option_name, option_value in self.kwds.items()
if option_value is not None
and option_name
in ("include_columns", "null_values", "true_values", "false_values")
}
self.read_options = {
"autogenerate_column_names": self.header is None,
"skip_rows": self.header
if self.header is not None
else self.kwds["skiprows"],
}
def _finalize_output(self, frame: DataFrame) -> DataFrame:
"""
Processes data read in based on kwargs.
Parameters
----------
frame: DataFrame
The DataFrame to process.
Returns
-------
DataFrame
The processed DataFrame.
"""
num_cols = len(frame.columns)
multi_index_named = True
if self.header is None:
if self.names is None:
if self.prefix is not None:
self.names = [f"{self.prefix}{i}" for i in range(num_cols)]
elif self.header is None:
self.names = range(num_cols)
if len(self.names) != num_cols:
# usecols is passed through to pyarrow, we only handle index col here
# The only way self.names is not the same length as number of cols is
# if we have int index_col. We should just pad the names(they will get
# removed anyways) to expected length then.
self.names = list(range(num_cols - len(self.names))) + self.names
multi_index_named = False
frame.columns = self.names
# we only need the frame not the names
# error: Incompatible types in assignment (expression has type
# "Union[List[Union[Union[str, int, float, bool], Union[Period, Timestamp,
# Timedelta, Any]]], Index]", variable has type "Index") [assignment]
frame.columns, frame = self._do_date_conversions( # type: ignore[assignment]
frame.columns, frame
)
if self.index_col is not None:
for i, item in enumerate(self.index_col):
if is_integer(item):
self.index_col[i] = frame.columns[item]
else:
# String case
if item not in frame.columns:
raise ValueError(f"Index {item} invalid")
frame.set_index(self.index_col, drop=True, inplace=True)
# Clear names if headerless and no name given
if self.header is None and not multi_index_named:
frame.index.names = [None] * len(frame.index.names)
if self.kwds.get("dtype") is not None:
frame = frame.astype(self.kwds.get("dtype"))
return frame
def read(self) -> DataFrame:
"""
Reads the contents of a CSV file into a DataFrame and
processes it according to the kwargs passed in the
constructor.
Returns
-------
DataFrame
The DataFrame created from the CSV file.
"""
pyarrow_csv = | import_optional_dependency("pyarrow.csv") | pandas.compat._optional.import_optional_dependency |
from nltk import ngrams
import collections
import string
import tika
tika.initVM()
import re
from tika import parser
import pandas as pd
import PyPDF2
import os
import shutil
import ast
import numpy as np
import jellyfish
from fuzzywuzzy import fuzz
import dill
import click
from report_pattern_analysis import rec_separate
# ========= Data structures, initializations and hyperparameters
global PREP, PUNC, WORD, DIGI, UNIT
global prepos, punc, units
global threshold, current_document, counter
global learned_patterns, all_patterns, current_patterns, interesting_patterns, fuzzy_patterns
PREP='Prep~'
PUNC='Punc~'
WORD='Word~'
DIGI='Digi~'
UNIT='Unit~'
# ========== utility functions
def remove_files(file_paths):
for file_path in file_paths:
if os.path.exists(file_path):
os.remove(file_path)
def savemodel(model,outfile):
with open(outfile, 'wb') as output:
dill.dump(model, output)
return ''
def loadmodel(infile):
model=''
with open(infile, 'rb') as inp:
model = dill.load(inp)
return model
def ispunc(string):
if re.match('[^a-zA-Z\d]',string):
return True
return False
def break_natural_boundaries(string):
stringbreak=[]
if len(string.split(' ')) > 1:
stringbreak = string.split(' ')
else:
# spl = '[\.\,|\%|\$|\^|\*|\@|\!|\_|\-|\(|\)|\:|\;|\'|\"|\{|\}|\[|\]|]'
alpha = '[A-z]'
num = '\d'
spl='[^A-z\d]'
matchindex = set()
matchindex.update(set(m.start() for m in re.finditer(num + alpha, string)))
matchindex.update(set(m.start() for m in re.finditer(alpha + num, string)))
matchindex.update(set(m.start() for m in re.finditer(spl + alpha, string)))
matchindex.update(set(m.start() for m in re.finditer(alpha + spl, string)))
matchindex.update(set(m.start() for m in re.finditer(spl + num, string)))
matchindex.update(set(m.start() for m in re.finditer(num + spl, string)))
matchindex.update(set(m.start() for m in re.finditer(spl + spl, string)))
matchindex.add(len(string)-1)
matchindex = sorted(matchindex)
start = 0
for i in matchindex:
end = i
stringbreak.append(string[start:end + 1])
start = i+1
return stringbreak
def break_and_split(arr):
new_arr=[]
for token in arr:
new_arr.extend(break_natural_boundaries(token))
return new_arr
def split_pdf_pages(input_pdf_path, target_dir, fname_fmt=u"{num_page:04d}.pdf"):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if 'doc' in input_pdf_path:
shutil.copyfile(input_pdf_path, (target_dir + "/delete"))
return
with open(input_pdf_path, "rb") as input_stream:
input_pdf = PyPDF2.PdfFileReader(input_stream)
if input_pdf.flattenedPages is None:
# flatten the file using getNumPages()
input_pdf.getNumPages() # or call input_pdf._flatten()
for num_page, page in enumerate(input_pdf.flattenedPages):
output = PyPDF2.PdfFileWriter()
output.addPage(page)
file_name = os.path.join(target_dir, fname_fmt.format(num_page=num_page))
with open(file_name, "wb") as output_stream:
output.write(output_stream)
def levenshtein_similarity(s, t):
""" Levenshtein Similarity """
Ns = len(s);
Nt = len(t);
lev_sim = 1.0 - (jellyfish.levenshtein_distance(s, t)) / float(max(Ns, Nt))
return lev_sim
def word_similarity(s,t, type=''):
if type=='leven':
return levenshtein_similarity(s, t)
else:
return float(fuzz.ratio(s.upper(), t.upper()))/100
# ========== state changing functions
def find_entites(hpattern, mask=[]):
'''
aggrerate the tokens that are next to each other as an entity. Finds multiple entities in a single pattern.
Uses the mask to discount the masked tokens.
:param hpattern:
:param mask:
:return:
'''
if len(mask) == 0:
mask = list(np.full(len(hpattern), True))
entities=[]
entity=''
dummied_hpatteren=list(hpattern)
dummied_hpatteren.append(('~', '~', '~'))
dummied_hpatteren=tuple(dummied_hpatteren)
mask.append(True)
for token, select in zip(dummied_hpatteren, mask):
if not select:
continue
if token[2]==WORD:
entity+=' '+token[0]
else:
if entity!='':
entities.append(entity)
entity = ''
return entities
def find_units(hpattern, mask=[]):
'''
find the units in the pattern
:param hpattern:
:param mask:
:return:
'''
if len(mask) == 0:
mask = list(np.full(len(hpattern), True))
units=[]
for token, select in zip(hpattern,mask):
if not select:
continue
if len(token)>=4 and token[3]==UNIT:
units.append(token[0])
return units
def find_values(instance, hpattern, mask=[]):
'''
find the values in the pattern
:param instance:
:param hpattern:
:param mask:
:return:
'''
values=[]
if len(mask)==0:
mask=list(np.full(len(hpattern),True))
for token_inst,token_patt,select in zip(instance, hpattern, mask):
if not select:
continue
if token_patt[2]==DIGI:
values.append(token_inst)
return values
def find_exact_patterns(hpattern):
'''
finds the hpatterns that are exact to the given hpattern
look by base patterns, as they don't have the variable/value
:param hpattern:
:return:
'''
global current_patterns
exact_pattern_ids=[]
base_pattern=str(get_base_pattern(ast.literal_eval(hpattern)))
if base_pattern in list(current_patterns['base_pattern']):
exact_pattern_ids.append(list(current_patterns[current_patterns['base_pattern']==base_pattern]['pattern_id'])[0])
return exact_pattern_ids
def find_close_patterns(hpattern):
'''
finds the hpatterns that are closest to the given hpattern
:param hpattern:
:return:
'''
global current_patterns
close_pattern_ids=[]
hpattern=ast.literal_eval(hpattern)
entities=find_entites(hpattern)
units=find_units(hpattern)
close_patterns=[]
for _, row in current_patterns.iterrows():
confidence_flag_entity = 0
confidence_flag_unit = 0
confidence=0 # todo: give the best score here; will help decide the rank
hpattern_iter=ast.literal_eval(str(row['hpattern']))
mask = str(row['mask'])
if mask == '':
mask = []
else:
mask = ast.literal_eval(str(row['mask']))
entities_iter=find_entites(hpattern_iter,mask)
units_iter=find_units(hpattern_iter,mask)
for entity_iter in entities_iter:
for entity in entities:
if word_similarity(entity,entity_iter)>0.5:
confidence_flag_entity=1
for unit_iter in units_iter:
for unit in units:
if unit.lower()==unit_iter.lower():
confidence_flag_unit=1
if confidence_flag_entity==1 or confidence_flag_unit==1:
close_patterns.append((row['pattern_id'],confidence_flag_entity,confidence_flag_unit))
# todo: here rank the patterns according to confidence and return the top n
for conf in close_patterns:
close_pattern_ids.append(conf[0])
return close_pattern_ids
def find_far_patterns(entity_name, aliases=[]):
'''
finds the patterns that have similar entity names
:param entity_name:
:return:
'''
global current_patterns
far_pattern_ids=[]
aliases.append(entity_name)
for _, row in current_patterns.iterrows():
mask = str(row['mask'])
if mask == '':
mask = []
else:
mask=ast.literal_eval(str(row['mask']))
hpattern_iter = ast.literal_eval(str(row['hpattern']))
entities_iter = find_entites(hpattern_iter, mask)
for entity_iter in entities_iter:
for alias in aliases:
if word_similarity(alias, entity_iter) > 0.5:
far_pattern_ids.append(row['pattern_id'])
return far_pattern_ids
def matcher_bo_entity(entity_name,seed_aliases):
'''
if the entity name is already present in the learned_patterns, it gets the exact pattern. Then checks if it is present in the current_patterns.
if present then just returns the exact pattern. If not, then finds the closest pattern in current_pattern.
:param entity_name:
:return:
'''
global learned_patterns
global all_patterns
pre_learned_patterns=[]
pre_learned_masks=[]
exact_pattern_ids=[]
exact_masks = {}
close_pattern_ids=[]
far_pattern_ids=[]
# check if the any patterns for the entity have already been identified
if entity_name in list(learned_patterns['entity_name']):
# seed_aliases=str(list(learned_patterns[learned_patterns['entity_name'] == entity_name]['seed_aliases'])[0])
# seed_aliases=seed_aliases.split(',')
pattern_ids=str(list(learned_patterns[learned_patterns['entity_name'] == entity_name]['pattern_ids'])[0])
if pattern_ids!='':
pattern_ids=ast.literal_eval(pattern_ids)
for pattern_id in pattern_ids:
# get the pattern using the id
pre_learned_patterns.append(str(list(all_patterns[all_patterns['pattern_id']==pattern_id]['hpattern'])[0]))
pre_learned_mask=str(list(all_patterns[all_patterns['pattern_id'] == pattern_id]['mask'])[0])
if pre_learned_mask!='':
pre_learned_masks.append(ast.literal_eval(pre_learned_mask))
else:
pre_learned_masks.append([])
# find suitable current patterns
if len(pre_learned_patterns)!=0:
print('We have seen this entity before! Let us find if the exact pattens work...')
for hpattern, mask in zip(pre_learned_patterns, pre_learned_masks):
# check if the exact pattern is present in the current patterns
exact_hpatterns_found=find_exact_patterns(hpattern)
exact_pattern_ids.extend(exact_hpatterns_found)
for pattern_id in exact_hpatterns_found:
exact_masks[pattern_id]=mask
if len(exact_pattern_ids)>0:
print('looks like the entity is present in the same form! Great!')
else:
print('finding patterns closer to learned patterns ...')
for hpattern in pre_learned_patterns:
# find the closest patterns
close_pattern_ids.extend(find_close_patterns(hpattern))
else:
# find the patterns that have similar entity name
print('looks like nothing is close enough is there! Let us just find the closest seeming entity by the name!')
far_pattern_ids.extend(find_far_patterns(entity_name,aliases=seed_aliases))
return exact_pattern_ids, close_pattern_ids, far_pattern_ids, exact_masks
def matcher_bo_value(entity_value):
'''
searches for all the patterns in current_pattern that have the particular value associated with them
:param entity_value:
:return:
'''
global current_patterns
exact_pattern_ids=[]
instance_samples=[] # one instance per pattern
for _, row in current_patterns.iterrows():
instances=ast.literal_eval(str(row['instances']))
for instance in instances:
if entity_value in instance:
exact_pattern_ids.append(row['pattern_id'])
instance_samples.append(instance)
break
return exact_pattern_ids, instance_samples
def parse_document(file_path):
parsed_text=[]
# create a dir for dumping split pdfs
if os.path.exists('./temp'):
shutil.rmtree('./temp/')
else:
os.mkdir('./temp')
split_pdf_pages(file_path, 'temp')
for pdf_page in os.listdir('temp'):
# print('processing page: ',pdf_page)
parsed = parser.from_file(os.path.join('temp', pdf_page))
try:
pdftext = parsed['content']
except Exception:
print("Could not read file.")
pdftext=''
parsed_text.append(pdftext)
return parsed_text
def filter1(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
global threshold
# if the pattern occurs in the document less than the threshold then return false
if int(row['num_instances'])>threshold:
return True
return False
def filter2(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
pattern=ast.literal_eval(str(row['hpattern']))
# if the first token is preposition/pronoun or punctuation then return false
if pattern[0][2] ==PREP or pattern[0][2] ==PUNC:
return False
return True
def filter3(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
pattern=ast.literal_eval(str(row['hpattern']))
for token in pattern:
# if atleast one entity/unit found, it is okay
if token[2] == WORD:
return True
return False
def filter4(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
pattern=ast.literal_eval(str(row['hpattern']))
for token in pattern:
# if atleast one number found, it is okay
if token[2] == DIGI:
return True
return False
def apply_filters(fltr):
'''
Apply filters to remove 'irrelevant' current patterns: see filter1 impl
:param: fltr: a function
:return:
'''
global current_patterns
current_patterns=current_patterns[current_patterns.apply(lambda x: fltr(x), axis=1)]
print('FILTERED! now number of patterns: ', len(current_patterns))
def getID():
global counter
counter+=1
return counter
def get_base_pattern(hpattern):
'''
takes the second level of an hpattern (non variable tokens)
:param hpattern:
:return:
'''
base_pattern=[]
for patt in hpattern:
base_pattern.append(patt[1])
return tuple(base_pattern)
def create_hpattern(instance):
'''
creates a heirarchy of 'denominations/classes' for each base pattern
:param instance:
:return: base_pattern, h_pattern
'''
global punc
global prepos
global units
signature = []
for token in instance:
if token in prepos:
signature.append((token, token, PREP))
elif token.isnumeric():
signature.append((token, DIGI, DIGI))
elif token.isalpha():
sign=[token, token, WORD]
if token.lower() in units:
sign.append(UNIT)
signature.append(tuple(sign))
elif ispunc(token):
signature.append((token, token, PUNC))
else:
signature.append((token))
return tuple(signature)
def create_patterns_per_doc(parsed_text):
'''
:param parsed_text: it should be a list of texts. One item/text for every page in the document.
:return:
'''
global current_patterns
global current_document
instance_order_temp=0
all_hpatterns=[]
all_base_patterns=[]
all_instances = []
all_instances_orders = []
for page in parsed_text:
page_hpatterns=[]
page_base_patterns=[]
page_instances = []
for line in page.split('\n'): # pattern analysis is done based on each line
# create chunks by dividing on commas+space, period+space (and multi-space??) so that patterns don't span beyond them
# chunks=re.split(', |\. |\s{2,}',line)
chunks = re.split(', |\. |;', line.lower())
# print(line, chunks)
# remove commas from numbers (8,643), give valid spacing around #, = and @
# tokenize everything based on spaces/tabs
# creates a list(chunk) of lists(tokens): [[token,token,token],[token,token]]
chunks = [
chunk.replace(",", "").replace("=", " = ").replace("@", " @ ").replace("#", " # ").replace("$", " $ ").
replace("°", " ° ").replace("%", " % ").replace("\"", " \" ").replace("'", " ' ").replace(":",
" : ").split()
for chunk in chunks]
# separate the tokens further using the natural seperation boundaries
chunks = [break_and_split(chunk) for chunk in chunks]
chunks_base_patterns=[]
chunks_hpatterns=[]
for chunk in chunks:
# convert each chunk to base pattern and hpattern
hpattern=create_hpattern(chunk)
base_pattern=get_base_pattern(hpattern)
chunks_base_patterns.append(base_pattern)
chunks_hpatterns.append(hpattern)
# create n-grams
n_gram_range = (3, 4, 5, 6, 7)
for n in n_gram_range:
all_grams_base_patterns = list(map(lambda x: list(ngrams(x, n)), chunks_base_patterns))
all_grams_hpatterns = list(map(lambda x: list(ngrams(x, n)), chunks_hpatterns))
all_grams = list(map(lambda x: list(ngrams(x, n)), chunks))
# flatten the nested list
all_grams_base_patterns = [item for sublist in all_grams_base_patterns for item in sublist]
all_grams_hpatterns = [item for sublist in all_grams_hpatterns for item in sublist]
all_grams = [item for sublist in all_grams for item in sublist]
page_base_patterns.extend(all_grams_base_patterns)
page_hpatterns.extend(all_grams_hpatterns)
page_instances.extend(all_grams)
all_base_patterns.append(page_base_patterns)
all_hpatterns.append(page_hpatterns)
all_instances.append(page_instances)
all_instances_orders.append(list(range(instance_order_temp, instance_order_temp + len(page_instances))))
instance_order_temp+=len(page_instances)
all_page_numbers=[]
for indx, _ in enumerate(all_instances):
all_page_numbers.append(list(np.full(len(_),indx+1)))
all_base_patterns_flattened=[item for sublist in all_base_patterns for item in sublist]
all_hpatterns_flattened = [item for sublist in all_hpatterns for item in sublist]
all_instances_flattened = [item for sublist in all_instances for item in sublist]
all_page_numbers_flattened=[item for sublist in all_page_numbers for item in sublist]
all_instances_orders_flattened=[item for sublist in all_instances_orders for item in sublist]
counted_patterns = collections.Counter(all_base_patterns_flattened)
# ======= get the longest pattern with the same support (keeps only the superset, based on minsup criteria)
# todo: check if works correctly
filtered_patterns = {}
for pattern in counted_patterns.keys():
# create the ngrams/subsets of a set and check if they are already present, if so check minsup and delete
len_pattern = len(pattern)
filtered_patterns[pattern] = counted_patterns[pattern]
for i in range(1, len_pattern):
# create all size sub patterns/n-grams
subpatterns = list(ngrams(pattern, i))
for subpattern in subpatterns:
if subpattern in filtered_patterns.keys() and filtered_patterns[subpattern] == counted_patterns[pattern]:
# delete subpattern
# print('deleting',subpattern,', because: ', pattern, filtered_pattens[subpattern], counted[pattern])
filtered_patterns.pop(subpattern)
# ========== create data frame
# aggregate the instances based on base patterns
# create a mapping from base pattern to hpattern
aggregated_pattern_instance_mapping={}
aggregated_pattern_pagenumber_mapping={}
aggregated_pattern_order_mapping = {}
base_pattern_to_hpattern={}
for pattern, hpattern, instance, page_number, instance_order in zip(all_base_patterns_flattened, all_hpatterns_flattened, all_instances_flattened,all_page_numbers_flattened, all_instances_orders_flattened):
# aggregate
if pattern not in aggregated_pattern_instance_mapping.keys():
aggregated_pattern_instance_mapping[pattern]=[]
aggregated_pattern_pagenumber_mapping[pattern]=[]
aggregated_pattern_order_mapping[pattern]=[]
aggregated_pattern_instance_mapping[pattern].append(instance)
aggregated_pattern_pagenumber_mapping[pattern].append(page_number)
aggregated_pattern_order_mapping[pattern].append(instance_order)
# mapping
if pattern not in base_pattern_to_hpattern.keys():
base_pattern_to_hpattern[pattern]=hpattern
for pattern in aggregated_pattern_instance_mapping.keys():
if pattern in filtered_patterns:
pattern_id=getID()
current_patterns=current_patterns.append({'pattern_id':pattern_id,'base_pattern':str(pattern),'instances':str(aggregated_pattern_instance_mapping[pattern]),
'page_numbers':str(aggregated_pattern_pagenumber_mapping[pattern]),'instances_orders':str(aggregated_pattern_order_mapping[pattern]),'hpattern':str(base_pattern_to_hpattern[pattern]),'document_name':current_document,'num_instances':str(counted_patterns[pattern])}, ignore_index=True)
# ============= apply filters
# filter the patterns that have the number of instances below a certain threshold
apply_filters(filter1)
# remove the ones that start with a punctuation or preposition
apply_filters(filter2)
# remove the patterns that have only punctuations, prepositions and numbers
apply_filters(filter3)
# remove the ones that have no numbers
apply_filters(filter4)
current_patterns = current_patterns.replace(np.nan, '', regex=True)
current_patterns.to_csv('current_patterns.csv')
def find_interesting_patterns():
'''
using the list of other patterns, find the matching patterns from the current document
:param patterns:
:return:
'''
global interesting_patterns
def init(file_path, fresh=False):
'''
initialize and load all the relevant dataframes and datastructures
:param file_path
:param fresh : if True then initialize everything anew
:return:
'''
global prepos, punc, units
global threshold, current_document_path, counter
global learned_patterns, all_patterns, current_patterns, other_patterns, other_pattern_instances
prepos = ['aboard', 'about', 'above', 'across', 'after', 'against', 'along', 'amid', 'among', 'anti', 'around',
'as',
'at', 'before', 'behind', 'below', 'beneath', 'beside', 'besides', 'between', 'beyond', 'but', 'by',
'concerning', 'considering', 'despite', 'down', 'during', 'except', 'excepting', 'excluding', 'following',
'for', 'from', 'in', 'inside', 'into', 'like', 'minus', 'near', 'of', 'off', 'on', 'onto', 'opposite',
'outside',
'over', 'past', 'per', 'plus', 'regarding', 'round', 'save', 'since', 'than', 'through', 'to', 'toward',
'towards',
'under', 'underneath', 'unlike', 'until', 'up', 'upon', 'versus', 'via', 'with', 'within', 'without',
'and', 'or']
units = ['ft', 'gal', 'ppa', 'psi', 'lbs', 'lb', 'bpm', 'bbls', 'bbl', '\'', "\"", "'", "°", "$", 'hrs']
punc = set(string.punctuation)
if_seen_document_before=False
threshold = 6
# save state across documents
if os.path.exists('counter'):
counter=loadmodel('counter')
else:
counter = 0
print('counter',counter)
current_document_path = ''
global current_document
current_document = file_path.split('/')[-1]
# entity matchings for all the documents processed so far
if os.path.exists('learned_patterns.csv'):
learned_patterns = pd.read_csv('learned_patterns.csv', index_col=0)
learned_patterns = learned_patterns.replace(np.nan, '', regex=True)
else:
learned_patterns = pd.DataFrame(columns=['entity_name', 'seed_aliases', 'pattern_ids'])
# pattern information about all the patterns seen so far from all the documents processed
if os.path.exists('all_patterns.csv'):
all_patterns = pd.read_csv('all_patterns.csv', index_col=0)
all_patterns = all_patterns.replace(np.nan, '', regex=True)
current_document = file_path.split('/')[-1]
if len(all_patterns[all_patterns['document_name']==current_document])!=0:
if_seen_document_before=True
else:
all_patterns = pd.DataFrame(
columns=['pattern_id', 'base_pattern', 'instances', 'hpattern', 'document_name', 'num_instances', 'mask','page_numbers'])
if if_seen_document_before:
print('Seen the document before. Loading patterns.: ' + current_document)
current_patterns = all_patterns[all_patterns['document_name']==current_document]
current_patterns.reset_index(drop=True)
else:
current_patterns = pd.DataFrame(
columns=['pattern_id', 'base_pattern', 'instances', 'hpattern', 'document_name', 'num_instances', 'mask',
'page_numbers'])
other_patterns = pd.DataFrame(columns=['hpattern', 'instances', 'document_name'])
parsed_text = parse_document(file_path)
print('Creating patterns for the document: ' + current_document)
create_patterns_per_doc(parsed_text)
# todo: remove this, just for testing, uncomment above
# current_patterns= pd.read_csv('current_patterns.csv',index_col=0)
# current_patterns = current_patterns.replace(np.nan, '', regex=True)
all_patterns = pd.concat([all_patterns, current_patterns])
def close():
'''
:return:
'''
global all_patterns
global current_patterns
global counter
# ============ add to the list of all patterns seen so far
# all_patterns=pd.concat([all_patterns, current_patterns]) # done in init() now
# all_patterns.to_csv('all_patterns.csv')
savemodel(counter,'counter')
# build_report_pagewise()
build_report_w_smart_align()
# todo: add the finding of 'interesting' patterns
# ==================================== report building
def build_report_pagewise():
'''
when the training is done, build the report based on the learned structures
:return:
'''
global all_patterns
global learned_patterns
report=pd.DataFrame(columns=['page number','document name'])
for index, row in learned_patterns.iterrows():
entity_name=row['entity_name']
pattern_ids=ast.literal_eval(str(row['pattern_ids']))
for pattern_id in pattern_ids:
report_temp=pd.DataFrame(columns=[entity_name,'page number','document name'])
row=all_patterns[all_patterns['pattern_id']==pattern_id]
instances=ast.literal_eval(str(list(row['instances'])[0]))
hpattern=ast.literal_eval(str(list(row['hpattern'])[0]))
mask=ast.literal_eval(str(list(row['mask'])[0]))
if mask=='':
mask=[]
page_numbers = ast.literal_eval(str(list(row['page_numbers'])[0]))
document_name=list(row['document_name'])[0]
document_names=[document_name] * len(page_numbers)
values_in_instances=[]
for instance in instances:
values=find_values(instance, hpattern, mask)
values_in_instances.append(' '.join(values))
report_temp[entity_name]=values_in_instances
report_temp['page number']=page_numbers
report_temp['document name']=document_names
# aggregate by page number
def agg_for_doc(series):
return list(series)[0]
def agg_for_entity_values(series):
return '\n'.join(series)
report_temp=report_temp.groupby(['page number'],as_index=False).agg({'document name': agg_for_doc,
entity_name: agg_for_entity_values,
})
report=pd.merge(report, report_temp, how='outer', on=['page number', 'document name'])
new_names = {}
for col_name in list(report.columns):
if '_x' in col_name or '_y' in col_name:
new_names[col_name] = col_name[:-2]
report = report.rename(index=str, columns=new_names)
def sjoin(x):
return ';'.join(x[x.notnull()].astype(str))
report = report.groupby(level=0, axis=1).apply(lambda x: x.apply(sjoin, axis=1))
# aggregate by page number and document name one last time
agg_dict = {}
agg_func = lambda x: ' '.join(x)
for column in report:
if column != 'page number' and column != 'document name':
agg_dict[column] = agg_func
report =report.groupby(['page number', 'document name'],as_index=False).agg(agg_dict)
report=report.sort_values(by=['document name','page number'])
report.to_csv('report_pagewise.csv')
def build_report_w_smart_align():
stage_name = "stage"
global all_patterns
global learned_patterns
# initialize by random entity names
all_patterns['entity_name'] = pd.Series(np.random.randn(len(all_patterns)), index=all_patterns.index)
# print(all_patterns['entity_name'])
# adding the learned entity_names to the respective rows/patterns in all_patterns
all_pattern_ids = []
print(learned_patterns)
for index, row in learned_patterns.iterrows():
entity_name = row['entity_name']
pattern_ids = ast.literal_eval(str(row['pattern_ids']))
all_pattern_ids.extend(pattern_ids)
for id in pattern_ids:
all_patterns.loc[all_patterns['pattern_id'] == id, 'entity_name'] = entity_name
all_patterns = all_patterns[all_patterns['pattern_id'].isin(all_pattern_ids)]
all_patterns = all_patterns.reset_index(drop=True)
# do for each document:
# create a data frame of instances as rows to work on for record separation
final_record=pd.DataFrame()
for document_name in all_patterns['document_name'].unique():
all_patterns_doc=all_patterns[all_patterns['document_name']==document_name]
entities = set(all_patterns_doc['entity_name'])
series = pd.DataFrame()
for entity in entities:
instances_orders = ast.literal_eval(str(list(all_patterns_doc[all_patterns_doc['entity_name'] == entity]['instances_orders'])[0]))
instances = ast.literal_eval(str(list(all_patterns_doc[all_patterns_doc['entity_name'] == entity]['instances'])[0]))
hpattern=ast.literal_eval(str(list(all_patterns_doc[all_patterns_doc['entity_name'] == entity]['hpattern'])[0]))
mask = ast.literal_eval(
str(list(all_patterns_doc[all_patterns_doc['entity_name'] == entity]['mask'])[0]))
entity_names = [entity] * len(instances)
hpattern=[hpattern] * len(instances)
mask = [mask] * len(instances)
df = pd.DataFrame(
data={"instances_orders": instances_orders, "instances": instances, "entity_name": entity_names,"hpattern":hpattern, "mask":mask})
series = | pd.concat([series, df]) | pandas.concat |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import glob
def ecdf(data):
"""
Computes the empirical cumulative distribution function for a collection of provided data.
Parameters
----------
data : 1d-array, Pandas Series, or list
One-dimensional collection of data for which the ECDF will
be computed
Returns
-------
x, y : 1d-arrays
The sorted x data and the computed ECDF
"""
return np.sort(data), np.arange(0, len(data)) / len(data)
def compute_statistics(df, varnames=None, logprob_name='logp'):
R"""
Computes the mode, hpd_min, and hpd_max from a pandas DataFrame. The value
of the log posterior must be included in the DataFrame.
"""
# Get the vars we care about.
if varnames is None:
varnames = [v for v in df.keys() if v is not 'logp']
# Find the max of the log posterior.
ind = np.argmax(df[logprob_name].values)
# Instantiate the dataframe for the parameters.
stat_df = pd.DataFrame([], columns=['parameter', 'mean', 'median', 'mode', 'hpd_min',
'hpd_max'])
for v in varnames:
mode = df.iloc[ind][v]
median = df[v].median()
mean = df[v].mean()
hpd_min, hpd_max = compute_hpd(df[v].values, mass_frac=0.95)
stat_dict = dict(parameter=v, median=median, mean=mean, mode=mode, hpd_min=hpd_min,
hpd_max=hpd_max)
stat_df = stat_df.append(stat_dict, ignore_index=True)
return stat_df
def compute_hpd(trace, mass_frac):
R"""
Returns highest probability density region given by
a set of samples.
Parameters
----------
trace : array
1D array of MCMC samples for a single variable
mass_frac : float with 0 < mass_frac <= 1
The fraction of the probability to be included in
the HPD. For hreple, `massfrac` = 0.95 gives a
95% HPD.
Returns
-------
output : array, shape (2,)
The bounds of the HPD
Notes
-----
We thank <NAME> (BBE, Caltech) for developing this function.
http://bebi103.caltech.edu/2015/tutorials/l06_credible_regions.html
"""
# Get sorted list
d = np.sort(np.copy(trace))
# Number of total samples taken
n = len(trace)
# Get number of samples that should be included in HPD
n_samples = np.floor(mass_frac * n).astype(int)
# Get width (in units of data) of all intervals with n_samples samples
int_width = d[n_samples:] - d[:n - n_samples]
# Pick out minimal interval
min_int = np.argmin(int_width)
# Return interval
return np.array([d[min_int], d[min_int + n_samples]])
def compute_mean_sem(df):
"""
Computes the mean and standard error of the fold-change given a
grouped pandas Series.
"""
# Compute the properties
mean_fc = df['fold_change'].mean()
sem_fc = df['fold_change'].std() / np.sqrt(len(df))
# Assemble the new pandas series and return.
samp_dict = {'mean': mean_fc, 'sem': sem_fc}
return | pd.Series(samp_dict) | pandas.Series |
import os
import h5py
import numpy as np
import pandas as pd
from config import DATA_PATH
class Scaler:
def __init__(self, data):
self.mean = np.mean(data)
self.std = np.std(data)
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return data * self.std + self.mean
def load_h5(filename, keywords):
f = h5py.File(filename, 'r')
data = []
for name in keywords:
data.append(np.array(f[name]))
f.close()
if len(data) == 1:
return data[0]
return data
def get_distance_matrix(loc):
n = loc.shape[0]
loc_1 = np.tile(np.reshape(loc, (n,1,2)), (1,n,1)) * np.pi / 180.0
loc_2 = np.tile(np.reshape(loc, (1,n,2)), (n,1,1)) * np.pi / 180.0
loc_diff = loc_1 - loc_2
dist = 2.0 * np.arcsin(
np.sqrt(np.sin(loc_diff[:,:,0] / 2) ** 2 + np.cos(loc_1[:,:,0]) * np.cos(loc_2[:,:,0]) * np.sin(loc_diff[:,:,1] / 2) ** 2)
)
dist = dist * 6378.137 * 10000000 / 10000
return dist
def build_graph(station_map, station_loc, n_neighbors):
dist = get_distance_matrix(station_loc)
n = station_map.shape[0]
src, dst = [], []
for i in range(n):
src += list(np.argsort(dist[:, i])[:n_neighbors + 1])
dst += [i] * (n_neighbors + 1)
mask = np.zeros((n, n))
mask[src, dst] = 1
dist[mask == 0] = np.inf
values = dist.flatten()
values = values[values != np.inf]
dist_mean = np.mean(values)
dist_std = np.std(values)
dist = np.exp(-(dist - dist_mean) / dist_std)
return dist, src, dst
def fill_missing(data):
T, N, D = data.shape
data = np.reshape(data, (T, N * D))
df = | pd.DataFrame(data) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import altair as alt
from ml_model import *
#### Title ####
st.title("How To Get Away With Murder: Data Edition")
st.write("If Batman were to study data visualization, it might look something like this.")
st.markdown("<p>Data taken from the <a href='https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present-Dashboard/5cd6-ry5g'>Chicago Crimes Dataset.</a></p>", unsafe_allow_html = True)
@st.cache # add caching so we load the data only once
def load_data(url):
# Load the crime data from https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present-Dashboard/5cd6-ry5g
df = pd.read_json(url)
df.columns = ['year', 'primary_type', 'num_crimes', 'num_arrests']
df['arrest_rate'] = df['num_arrests']/df['num_crimes']
df['crime_rate'] = df['num_crimes']
return df
crime_url = "https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=year,primary_type,count(primary_type),sum(case(arrest='1',1,true,0))&$group=primary_type,year"
df = load_data(crime_url)
#### Analysis Intro ####
st.markdown("<h2>Overall Crime Frequencies</h2>", unsafe_allow_html=True)
st.write("What is the current trend with the number of crimes in Chicago? Is this trend similarly reflected within each type of crime as well?")
st.write("Click on multiple types of crimes to see the changes in frequency over time for each type.")
selection = alt.selection_multi(fields=['primary_type'])
chart3 = alt.Chart(df).mark_area().encode(
alt.X("year:O"),
alt.Y("num_crimes:Q", stack='center', axis=None),
alt.Color("primary_type:N", scale=alt.Scale(scheme='category20b'), legend=None),
opacity=alt.condition(selection, alt.value(1), alt.value(0.2)),
tooltip='primary_type'
).add_selection(
selection
)
background = alt.Chart(df).mark_bar().encode(
alt.X('year:O'),
alt.Y('sum(num_crimes):Q'),
color=alt.value('#ddd')
)
hists = chart3.mark_bar(opacity=0.5, thickness=100).encode(
alt.X('year:O'),
alt.Y('num_crimes:Q'),
color=alt.Color('primary_type:N', scale=alt.Scale(scheme='category20b'), legend=None)
).transform_filter(
selection
)
#highlight = hist_base.transform_filter(selection)
st.write(chart3 | background + hists)
# chart = alt.Chart(df).mark_area().encode(
# alt.X("Year:T", axis=alt.Axis(domain=False, format='%Y', tickSize=0)),
# alt.Y("count(Year):Q"),
# alt.Color("Primary Type:N", scale=alt.Scale(scheme='category20b')),
# opacity=alt.condition(selection, alt.value(1), alt.value(0.2))
# ).add_selection(
# selection
# )
#### Number of Crimes vs. Arrest Rates ####
st.markdown("<h2>Arrest Rates Over Time</h2>", unsafe_allow_html=True)
st.write("Now that we've explored changes in types of crime over time, what is the trend for frequency of crimes and arrest rates?")
st.write("Hover over each line to see change in frequency and arrest rate for that particular type of crime.")
#crime_url = "https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=district,count(district),sum(case(arrest='1',1,true,0))&$group=district"
#return pd.read_json(crime_url)
highlight = alt.selection(type='single', on='mouseover', fields=['primary_type'], nearest=True)
crimes_base = alt.Chart(df).encode(
alt.X('year:O'),
alt.Y('num_crimes:Q'),
alt.Color('primary_type:N',legend=None),
tooltip='primary_type:N'
)
crimes_points = crimes_base.mark_circle().encode(
opacity=alt.value(0)
).add_selection(
highlight
)
crimes_lines = crimes_base.mark_line(interpolate='basis').encode(
size=alt.condition(~highlight, alt.value(1), alt.value(3)),
opacity=alt.condition(highlight, alt.value(1), alt.value(0.3))
)
arrests_base = alt.Chart(df).encode(
alt.X('year:O'),
alt.Y('arrest_rate:Q'),
alt.Color('primary_type:N', legend=None),
tooltip='primary_type:N'
)
arrests_points = arrests_base.mark_circle().encode(
opacity=alt.value(0)
).add_selection(
highlight
)
arrests_lines = arrests_base.mark_line(interpolate='basis').encode(
size=alt.condition(~highlight, alt.value(1), alt.value(3)),
opacity=alt.condition(highlight, alt.value(1), alt.value(0.3))
)
st.write(crimes_points+crimes_lines | arrests_points+arrests_lines)
#### Mapping Crimes ####
st.markdown("<h2>Location of Arrests</h2>", unsafe_allow_html=True)
st.write("What is the distribution of crimes over the city?")
st.write("Pan over the map to select an area to view and adjust the range of year.")
#chart.encode(y='num_crimes:Q') | chart.encode(y="arrest_rate:Q")
#st.write(chart)
#categorical_arrests = load_data('https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=year,primary_type,sum(case(arrest=%271%27,1,true,0)),count(arrest)&$group=year,primary_type')
#categorical_arrests.columns = ['year', 'primary_type', 'arrests', 'number of crimes']
#st.write(categorical_arrests)
nearest = alt.selection(type='single', nearest=True, on='mouseover',
fields=['year'], empty='none')
line = alt.Chart(df, height=600, width=800).mark_line(interpolate='basis').encode(
alt.X("year:O"),
alt.Y('arrest_rate:Q'),
alt.Color("primary_type:N", scale=alt.Scale(scheme='category20b'))
)
selectors = alt.Chart(df).mark_point().encode(
x='year:O',
opacity=alt.value(0),
).add_selection(
nearest
)
# Draw points on the line, and highlight based on selection
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5).encode(
text=alt.condition(nearest, 'arrest_rate:Q', alt.value(' '))
)
# Draw a rule at the location of the selection
rules = alt.Chart(df).mark_rule(color='gray').encode(
x='year:O',
).transform_filter(
nearest
)
chart2 = alt.layer(
line, selectors, points, rules, text
).properties(
width=600, height=300
)
#st.write(chart2)
@st.cache
def load_coordinate_data(url):
df = pd.read_json(url)
df.columns = ['year', 'x_coordinate','freq', 'y_coordinate', 'y_freq']
df = df.drop(columns=['y_freq'])
df = df.dropna(axis=0)
df = df[df['x_coordinate'] > 0]
return df
location_url = 'https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=year,x_coordinate,count(x_coordinate),y_coordinate,count(y_coordinate)&$group=year,x_coordinate,y_coordinate'
coordinate_df = load_coordinate_data(location_url)
#st.write(coordinate_df)
brush = alt.selection(type='interval')
start_year, end_year = st.slider("Years", 2001, 2020, (2001, 2020))
location_chart = alt.Chart(coordinate_df).mark_point().transform_filter(
(start_year <= alt.datum['year']) & (end_year >= alt.datum['year'])).encode(
alt.X('x_coordinate:Q', scale=alt.Scale(domain=(1100000,1205000))),
alt.Y('y_coordinate:Q', scale=alt.Scale(domain=(1810000,1960000)))
).add_selection(brush)
count_chart = alt.Chart(coordinate_df).mark_point().transform_filter(
(start_year <= alt.datum['year']) & (end_year >= alt.datum['year'])).encode(
alt.X('x_coordinate:Q', scale=alt.Scale(domain=(1100000,1205000))),
alt.Y('y_coordinate:Q', scale=alt.Scale(domain=(1810000,1960000))),
size='sum(freq):Q'
).transform_filter(brush)
st.write(location_chart | count_chart)
@st.cache
def load_district_data(url):
df = pd.read_json(url)
df.columns = ['year','district','freq','arrests']
df['arrest_rate'] = df['arrests']/df['freq']
return df
#district_data = load_district_data("https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=year,district,count(year),sum(case(arrest='1',1,true,0))&$group=year,district")
#st.write(district_data)
#### District Breakdown ####
st.markdown("<h2>Frequency of Crime and Arrest Rate per District</h2>", unsafe_allow_html=True)
st.write("How do districts differ in terms of frequency of crime? Do all districts have similar arrest rates?")
st.write("Click on a district or a line representing a district to highlight it. The average across all districts is marked in red.")
@st.cache # add caching so we load the data only once
def load_district_arrests():
# url for district arrests
crime_url = "https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=year,district,count(district),sum(case(arrest='1',1,true,0))&$group=year,district"
#crime_url = "https://data.cityofchicago.org/resource/ijzp-q8t2.json?$select=year,district,count(district)&$group=year,district"
df_district_count = | pd.read_json(crime_url) | pandas.read_json |
import pandas as pd
import numpy as np
from financePy import scraper as scr
from financePy import plotter
from scipy.optimize import minimize
from financePy import general_tools as gt
from financePy.estimators import finance_estimates as fe
"""
traili_ret_freq:
d,m,
dividens :
splits:
ownership:
opt1 = ['OwnershipData','ConcentratedOwners','Buyers','Sellers']
opt2 = ['mutualfund','institution']
executives:
company_profile:
realt_time_info :
complete_valuation
current_valuation
forward_valuation
history_valuation
financials
key_ratio
get_yield
"""
class Portfolio:
def __init__(self,symbols , start_date = [2000,1,1], end_date=False, by = 'm', desider_data = 'all',country = 'united states',fundamentals = True, tecnicals = True, folder = False):
if by.lower() in ['quandl','q']:
self.by = 'q'
self.tecnicals = scr.Q_data(symbols, start_date = start_date, end_date=end_date, folder = folder)
self.symbols = list(self.tecnicals.keys())
elif by.lower() in ['yahoo','y']:
self.by = 'y'
self.symbols = set()
if tecnicals:
temp = scr.Y_data(symbols, start_date = start_date, end_date=end_date, folder = folder)
for key in list(temp.keys()):
temp[key] = temp[key]['historical']
self.tecnicals = temp
self.symbols.union( list(self.tecnicals.keys()))
if fundamentals:
self.fundamentals = scr.Y_data(symbols,desider_data, start_date = start_date, end_date=end_date, folder = folder)
self.symbols.union( list(self.fundamentals.keys()))
elif by.lower() in ['morningstar','m']:
self.by = 'm'
self.symbols = set()
if fundamentals:
print('Morningstar fundamentals')
self.fundamentals = scr.MS_data(symbols,desider_data, folder = folder)
self.symbols = self.symbols.union(list(self.fundamentals.keys()))
if tecnicals:
print('\nMorningstar tecnicals')
temp = scr.MS_data(symbols,['historical'],start_date, end_date, folder = folder)
for key in list(temp.keys()):
temp[key] = temp[key]['historical']
self.tecnicals = temp
self.symbols = self.symbols.union(list(self.tecnicals.keys()))
else:
raise ValueError('I think you entered an invalid argument for by :')
if fundamentals and self.by != 'q':
self.big_tree = {}
for tick in self.fundamentals.keys():
single = []
for index in self.fundamentals[tick]:
temp = []
if type(self.fundamentals[tick][index]) == type({}):
for k in self.fundamentals[tick][index].keys():
for i in self.fundamentals[tick][index][k].index:
temp += ['%s~%s~%s' % (index,k,i)]
elif type(self.fundamentals[tick][index]) == type(pd.DataFrame()):
for i in self.fundamentals[tick][index].index:
temp += ['%s~%s' % (index,i)]
single += temp
self.big_tree[tick] = single
# def real_time(self, )
def remove(self,symbol):
self.symbols.remove(symbol)
self.tecnicals.pop(symbol)
try:
self.fundamentals.pop(symbol)
except:
pass
def screener(self,screeners, inplace = False):
self.screened = {}
for k,i in screeners.items():
screen = k
threshold = i[0]
up = i[1]
if screen in ['Volume','Close','Open','High','Low']:
# tecnicals
self.screened['historical'] = {}
if up:
percentile = np.percentile(np.array([self.tecnicals[x][screen].sum() for x in self.symbols]),100-threshold*100)
for x in list(self.symbols):
if self.tecnicals[x][screen].sum() < percentile and inplace:
self.remove(x)
elif self.tecnicals[x][screen].sum() > percentile and not inplace:
self.screened['historical'][x] = self.tecnicals[x]
else:
percentile = np.percentile(np.array([self.tecnicals[x][screen].sum() for x in self.symbols]),threshold*100)
for x in list(self.symbols):
if self.tecnicals[x][screen].mean() > percentile and inplace:
self.remove(x)
elif self.tecnicals[x][screen].sum() < percentile and not inplace:
self.screened['historical'][x] = self.tecnicals[x]
else:
# fundamentals
pass
def bollinger_bands(self, cat = 'Close', window = 20, dropna = True, notifier = False, output = False, plot = False):
bollinger_bands = {}
for symbol in self.tecnicals.keys():
bollinger_bands[symbol] = fe.bollinger_bands(self.tecnicals[symbol], cat = cat, window = window, dropna = dropna, notifier = notifier)
self.boll_bands = bollinger_bands
if plot:
plotter.boll_bands_plot(self.boll_bands, notifier)
if output:
return bollinger_bands
def EMA(self, windows_list = [35,70], cat = 'Close', plot = False, dropna = True, output = False, notifier = False):
ema = {}
for symbol in self.tecnicals.keys():
ema[symbol] = fe.exp_mov_avg(self.tecnicals[symbol], windows_list = windows_list, cat = cat, dropna = dropna, notifier = notifier)
self.ema = ema
if plot:
plotter.mov_avg_plot(ema,notifier)
if output:
return ema
def markowits_allocation(self, minimun = 0.01, plot = False):
symbols_list = list(self.tecnicals.keys())
prices = pd.concat([self.tecnicals[ticker].Close for ticker in symbols_list],1)
prices.fillna(method='ffill', inplace = True)
prices.columns = symbols_list
log_ret = np.log(prices/prices.shift(1))
constraints = ({'type' : 'eq', 'fun' : lambda x: np.sum(x)-1})
bounds = [(0,1) for i in range(len(symbols_list))]
init_guess = [ 1/len(symbols_list) for i in range(len(symbols_list))]
neg_sharpe = lambda x,y : -1 * gt.get_ret_vol_SR(x,y)[2]
opt_result = minimize(neg_sharpe, init_guess, args=(log_ret), method = 'SLSQP', bounds = bounds, constraints = constraints)
self.weights = opt_result.x[opt_result.x>minimun]/sum(opt_result.x)
self.stocks = list(np.array(symbols_list)[opt_result.x>minimun])
mark_result = pd.concat([ | pd.Series(self.stocks) | pandas.Series |
import numpy as np
import pandas as pd
from pandas import (
get_dummies,
)
from numpy.linalg import lstsq
import warnings
# before version 0.0.3, still use epsilon when demean
def demean_dataframe(df, consist_var, category_col, epsilon=1e-8, max_iter=1e6):
"""
:param df: Dataframe
:param consist_var: List of columns need centering on fixed effects
:param category_col: List of fixed effects
:param epsilon: Tolerance
:param max_iter: Maximum iterations
:return: Demeaned dataframe
"""
n = df.shape[0]
df_copy = df.copy()
is_unbalance = False
# if there's only one category variable, doesn't matter if balance or not.
## is_unbalance option is only used when there're two category variables
if len(category_col)>1:
n_cat = 1
for cat in category_col:
n_cat = n_cat * df[cat].nunique()
if n_cat > df.shape[0]:
warnings.warn('panel is unbalanced')
is_unbalance = True
#2020/12/23 when demean only once, no need to converge
if len(category_col) == 1:
cat = category_col[0]
for consist in consist_var:
df_copy[consist] = df[consist] - df.groupby(cat)[consist].transform('mean')
elif len(category_col) == 2:
df_copy = demean_dataframe_two_cat(df_copy, consist_var, category_col, is_unbalance)
else:
for consist in consist_var:
mse = 10
iter_count = 0
demeans_cache = np.zeros(n, np.float64)
while mse > epsilon:
for cat in category_col:
if iter_count == 0:
df_copy[consist] = df[consist] - df.groupby(cat)[consist].transform('mean')
else:
df_copy[consist] = df_copy[consist] - df_copy.groupby(cat)[consist].transform('mean')
iter_count += 1
mse = np.linalg.norm(df_copy[consist].values - demeans_cache)
demeans_cache = df_copy[consist].copy().values
if iter_count > max_iter:
raise RuntimeWarning('Exceeds the maximum iteration counts, please recheck dataset')
break
return df_copy
## 2021/12/16: to avoid convergence issue when panel is too unbalanced
# demean when category len equals 2
def demean_dataframe_two_cat(df_copy, consist_var, category_col, is_unbalance):
"""
reference: Baltagi http://library.wbi.ac.id/repository/27.pdf page 176, equation (9.30)
:param df_copy: Dataframe
:param consist_var: List of columns need centering on fixed effects
:param category_col: List of fixed effects
:return: Demeaned dataframe
"""
if is_unbalance:
# first determine which is uid or the category that has the most items
max_ncat = df_copy[category_col[0]].nunique()
max_cat = category_col[0]
for cat in category_col:
if df_copy[cat].nunique() >= max_ncat:
max_ncat = df_copy[cat].nunique()
max_cat = cat
min_cat = category_col.copy()
min_cat.remove(max_cat)
min_cat = min_cat[0]
df_copy.sort_values(by=[max_cat, min_cat], inplace=True)
# demean on the first category variable, max_cat
for consist in consist_var:
df_copy[consist] = df_copy[consist] - df_copy.groupby(max_cat)[consist].transform('mean')
dummies = | get_dummies(df_copy[min_cat]) | pandas.get_dummies |
"""Pandas/Numpy common recipes."""
import os
import scipy
import numpy as np
import pandas as pd
def rename_duplicates(series, delim="-"):
"""Rename duplicate values to be unique. ['a', 'a'] will become ['a', 'a-1'], for example.
:param series: series with values to rename
:type series: pandas.Series
:param delim: delimeter before duplicate-number index, defaults to "-"
:type delim: str, optional
:return: series where original duplicates have been renamed to -1, -2, etc.
:rtype: pandas.Series
"""
duplicate_suffix = (
series.groupby(series).cumcount().astype(str).replace("0", "")
) # a number for all but first occurence
extra_strs = delim + duplicate_suffix
# remove entries that are just the delim
extra_strs = extra_strs.replace(delim, "")
# add to values
out = series.astype(str) + extra_strs
# confirm unique (may fail if a-1 happened to match another element that preexisted!)
assert out.nunique() == out.shape[0]
return out
def merge_into_left(left, right, **kwargs):
"""Defensively merge [right] series or dataframe into [left] by index, preserving [left]'s index exactly. [right] data will be reordered to match [left] index.
:param left: left data whose index will be preserved
:type left: pandas.DataFrame or pandas.Series
:param right: right data which will be reordered based on left index.
:type right: pandas.DataFrame or pandas.Series
:param \**kwargs: passed to pandas.merge
:return: left-merged DataFrame with [left]'s index
:rtype: pandas.DataFrame
"""
# defensively cast to dataframe
df1 = pd.DataFrame(left)
df2 = pd.DataFrame(right)
df = pd.merge(
df1,
df2,
how="left",
left_index=True,
right_index=True,
sort=False,
validate="1:1",
**kwargs
)
# TODO: asserts are stripped away when code is optimized; replace with if not, raise ValueError('message')
assert df.shape[0] == df1.shape[0]
assert df.shape[1] == df1.shape[1] + df2.shape[1]
df.index = df1.index
return df
def horizontal_concat(df_left, df_right):
"""Concatenate df_right horizontally to df_left, with no checks for whether the indexes match, but confirming final shape.
:param df_left: Left data
:type df_left: pandas.DataFrame or pandas.Series
:param df_right: Right data
:type df_right: pandas.DataFrame or pandas.Series
:return: Copied dataframe with df_right's columns glued onto the right side of df_left's columns
:rtype: pandas.DataFrame
"""
# defensively cast to DataFrame
df1 = pd.DataFrame(df_left)
df2 = pd.DataFrame(df_right)
df = pd.concat([df1, df2], axis=1)
assert df.shape[0] == df1.shape[0] == df2.shape[0]
assert df.shape[1] == df1.shape[1] + df2.shape[1]
return df
def vertical_concat(df_top, df_bottom, reset_index=False):
"""Concatenate df_bottom vertically to df_top, with no checks for whether the columns match, but confirming final shape.
:param df_top: Top data
:type df_top: pandas.DataFrame
:param df_bottom: Bottom data
:type df_bottom: pandas.DataFrame
:param reset_index: Reset index values after concat, defaults to False
:type reset_index: bool, optional
:return: Copied dataframe with df_bottom's rows glued onto the bottom of df_top's rows
:rtype: pandas.DataFrame
"""
# defensively cast to DataFrame
df1 = pd.DataFrame(df_top)
df2 = pd.DataFrame(df_bottom)
df = | pd.concat([df1, df2], axis=0) | pandas.concat |
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import DateTruncation
from cape_privacy.pandas.transformations import NumericRounding
def _make_apply_numeric_rounding(input, expected_output, ctype, dtype):
transform = NumericRounding(dtype=ctype, precision=1)
df = pd.DataFrame({"amount": input}).astype(dtype)
expected = pd.DataFrame({"amount": expected_output}).astype(dtype)
df["amount"] = transform(df.amount)
return df, expected
def _make_apply_datetruncation(frequency, input_date, expected_date):
transform = DateTruncation(frequency=frequency)
df = pd.DataFrame({"date": [input_date]})
expected = pd.DataFrame({"date": [expected_date]})
df["date"] = transform(df.date)
return df, expected
def test_rounding_float32():
input = [10.8834, 4.21221]
expected_output = [10.9, 4.2]
df, expected = _make_apply_numeric_rounding(
input, expected_output, dtypes.Float, np.float32
)
pdt.assert_frame_equal(df, expected)
def test_rounding_float64():
input = [10.8834, 4.21221]
expected_output = [10.9, 4.2]
df, expected = _make_apply_numeric_rounding(
input, expected_output, dtypes.Double, np.float64
)
pdt.assert_frame_equal(df, expected)
def test_truncate_date_year():
input_date = datetime.date(year=2018, month=10, day=3)
expected_date = datetime.date(year=2018, month=1, day=1)
df, expected = _make_apply_datetruncation("YEAR", input_date, expected_date)
pdt.assert_frame_equal(df, expected)
def test_truncate_datetime_year():
input_date = pd.Timestamp(year=2018, month=10, day=3)
expected_date = pd.Timestamp(year=2018, month=1, day=1)
df, expected = _make_apply_datetruncation("YEAR", input_date, expected_date)
pdt.assert_frame_equal(df, expected)
def test_truncate_datetime_month():
input_date = | pd.Timestamp(year=2018, month=10, day=3, hour=9, minute=20, second=25) | pandas.Timestamp |
import re
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# ---------------------------------------------------
# Person data methods
# ---------------------------------------------------
class TransformGenderGetFromName:
"""Gets clients' genders from theirs russian second names.
Parameters:
column_name (str): Column name in InsolverDataFrame containing clients' names, column type is string.
column_gender (str): Column name in InsolverDataFrame for clients' genders.
gender_male (str): Return value for male gender in InsolverDataFrame, 'male' by default.
gender_female (str): Return value for female gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_name, column_gender, gender_male='male', gender_female='female'):
self.priority = 0
self.column_name = column_name
self.column_gender = column_gender
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _gender(client_name, gender_male, gender_female):
if pd.isnull(client_name):
gender = None
elif len(client_name) < 2:
gender = None
elif client_name.upper().endswith(('ИЧ', 'ОГЛЫ')):
gender = gender_male
elif client_name.upper().endswith(('НА', 'КЫЗЫ')):
gender = gender_female
else:
gender = None
return gender
def __call__(self, df):
df[self.column_gender] = df[self.column_name].apply(self._gender, args=(self.gender_male, self.gender_female,))
return df
class TransformAgeGetFromBirthday:
"""Gets clients' ages in years from theirs birth dates and policies' start dates.
Parameters:
column_date_birth (str): Column name in InsolverDataFrame containing clients' birth dates, column type is date.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_age (str): Column name in InsolverDataFrame for clients' ages in years, column type is int.
"""
def __init__(self, column_date_birth, column_date_start, column_age):
self.priority = 0
self.column_date_birth = column_date_birth
self.column_date_start = column_date_start
self.column_age = column_age
@staticmethod
def _age_get(datebirth_datestart):
date_birth = datebirth_datestart[0]
date_start = datebirth_datestart[1]
if pd.isnull(date_birth):
age = None
elif pd.isnull(date_start):
age = None
elif date_birth > datetime.datetime.now():
age = None
elif date_birth.year < datetime.datetime.now().year - 120:
age = None
elif date_birth > date_start:
age = None
else:
age = int((date_start - date_birth).days // 365.25)
return age
def __call__(self, df):
df[self.column_age] = df[[self.column_date_birth, self.column_date_start]].apply(self._age_get, axis=1)
return df
class TransformAge:
"""Transforms values of drivers' minimum ages in years.
Values under 'age_min' are invalid. Values over 'age_max' will be grouped.
Parameters:
column_driver_minage (str): Column name in InsolverDataFrame containing drivers' minimum ages in years,
column type is integer.
age_min (int): Minimum value of drivers' age in years, lower values are invalid, 18 by default.
age_max (int): Maximum value of drivers' age in years, bigger values will be grouped, 70 by default.
"""
def __init__(self, column_driver_minage, age_min=18, age_max=70):
self.priority = 1
self.column_driver_minage = column_driver_minage
self.age_min = age_min
self.age_max = age_max
@staticmethod
def _age(age, age_min, age_max):
if | pd.isnull(age) | pandas.isnull |
import logging
import pandas as pd
import os
import sys
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.compose import ColumnTransformer
def preproc_data(descriptors, metadata, labels):
logging.info("Scaling data, creating training and test sets.")
if descriptors.shape[0] != metadata.shape[0]:
logging.error("Mismatch in size of descriptors and metadata: %d versus %d, but both should be for the same number of observations/subjects." % (descriptors.shape[0], metadata.shape[0]))
if descriptors.shape[0] != labels.shape[0]:
logging.error("Mismatch in size of descriptors and labels: %d versus %d, but both should be for the same number of observations/subjects." % (descriptors.shape[0], labels.shape[0]))
numeric_features = list(descriptors.columns) # set to list of all column names from current dataframe
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', MinMaxScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder())])
## Add covariates to descriptors. Some are numerical (which is fine), but some are categorical and need special encoding.
## Add numerical covariates to descriptors:
numerical_covariates = ["AGE_AT_SCAN"]
for cov in numerical_covariates:
descriptors[cov] = metadata[cov]
## Add categorial covariates
categorical_covariates = ["SEX", "SITE_ID"]
for cov in categorical_covariates:
descriptors[cov] = metadata[cov]
categorical_features = categorical_covariates # The only categorial features in the dataframe are the covariates we just added.
features_to_be_removed = [] # No need to drop stuff so far. (Most important: the label is not part of the descriptors, as it comes from the metadata. So no need to remove the label.)
preprocessor = ColumnTransformer(
remainder = 'passthrough',
transformers=[
('numeric', numeric_transformer, numeric_features),
('categorical', categorical_transformer, categorical_features),
('remove', 'drop', features_to_be_removed)
])
# prepare data for classification task:
X_train, X_test, y_train, y_test = train_test_split(descriptors, labels, test_size=.4, random_state=42)
logging.debug("Received training data: descriptor shape is %s, and %d labels for it." % (str(X_train.shape), y_train.shape[0]))
logging.debug("Received test data: descriptor shape is %s, and %d labels for it." % (str(X_test.shape), y_test.shape[0]))
X_train = preprocessor.fit_transform(X_train)
X_test = preprocessor.fit_transform(X_test)
logging.debug("After pre-proc: Training data shape is %s, with %d labels for it." % (str(X_train.shape), y_train.shape[0]))
logging.debug("After pre-proc: Test data shape is %s, with %d labels for it." % (str(X_test.shape), y_test.shape[0]))
logging.info("Running dimensionality reduction (PCA).")
pca = PCA()
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
for pc in range(10):
logging.info(" PCA principal component #%d explained variance: %f" % (pc, pca.explained_variance_ratio_[pc]))
logging.debug("After PCA: Training data shape is %s, with %d labels for it." % (str(X_train.shape), y_train.shape[0]))
logging.debug("After PCA: Test data shape is %s, with %d labels for it." % (str(X_test.shape), y_test.shape[0]))
return X_train, X_test, y_train, y_test
def check_data(data):
"""
Check whether the number of descriptors in the training and test data is equal. If not, the one-hot-encoding of categorial features may have caused issues (i.e., some values occur only in the training data or only in the test data).
"""
X_train, X_test, y_train, y_test = data
num_features_train = X_train.shape[1]
num_features_test = X_test.shape[1]
if num_features_train != num_features_test:
logging.error("Mismatch between descriptor count in training and test data: %d versus %d. There may be categorical columns which lack column values in one of the two sets." % (num_features_train, num_features_test))
def load_data(descriptors_file, subjects_file, metadata_file):
"""
Load data and merge it.
Parameters
----------
descriptors_file: str
Path to a file containing brain descriptor values in CSV format. Each line should contain data on a single subject, and can have an arbitrary number of columns (descriptor values). All lines must have identical length, though. Must have a header line.
subject_file: str
Path to subjects text file, each line contains a single subject ID, no header.
metadata_file: str
Path to metadata CSV file from ABIDE data. The required file is named 'Phenotypic_V1_0b_preprocessed1.csv' when downloaded from ABIDE.
Returns
-------
descriptors: dataframe
Dataframe containing descriptor data, one subject per row.
metadata: dataframe
Dataframe containing metadata, one subject per row.
"""
logging.info("Reading brain descriptor data from file '%s', subject order from file '%s'." % (descriptors_file, subjects_file))
descriptors = pd.read_csv(descriptors_file, header=0)
logging.debug("Descriptor data shape: %s" % (str(descriptors.shape)))
subjects = pd.read_csv(subjects_file, header=None, names=["subject_id"])
logging.debug("Subject data shape: %s" % (str(subjects.shape)))
#logging.debug("Descriptors:")
#logging.debug(descriptors.head())
#descriptors["subject_id"] = subjects["subject_id"] # add subject IDs to descriptors dataframe
logging.debug("Merged descriptor data shape (with subject ID field): %s" % (str(descriptors.shape)))
logging.debug("Reading ABIDE metadata on subjects from file '%s'." % (metadata_file))
metadata = | pd.read_csv(metadata_file, header=0) | pandas.read_csv |
import pandas as pd
import numpy as np
def construct_freq_df(df_copy):
'''
Construct a dataframe such that indices are seperated by delta 1 min from the Market Data
and put it in a format that markov matrices can be obtained by the pd.crosstab() method
'''
#This is here in case user passes the actual dataframe, we do not want to modify the actual dataframe
df = df_copy.copy()
#Blank dataframe placeholder
frames = pd.DataFrame()
#Set the index to timestamp and convert it to pd timestamp
#The datatype of the timestamp column should be string
df.set_index('timestamp', inplace=True)
df.index = pd.to_datetime(df.index)
#We need to get customer behaviour from entry to checkout for each unique customerr
for customer in df['customer_no'].unique():
#get customer
temp_df = df[df['customer_no'] == customer]
#expand timestamp index such that delta T is 1 min, and forward fill isles
temp_df = temp_df.asfreq('T',method='ffill')
#insert 'entry' 1 min before first isle
#re sort index so that times make sense
#(WE MIGHT NEED TO SKIP THIS NOT SURE IF ENTRY STATE IS REQUIRED)
temp_df.loc[temp_df.index[0] - pd.to_timedelta('1min')] = [customer,'entry']
temp_df.sort_index(inplace=True)
#after is simply a shift(-1) of current location
#checkout location does not have an after, so drop the NA's here
temp_df['after'] = temp_df['location'].shift(-1)
temp_df.dropna(inplace=True)
#join the frequency table for each customer
frames = pd.concat([frames, temp_df], axis=0)
#return the frequency frame
return frames
def generate_markov_matrix(df_copy):
'''
Generate the Markov Matrix for a Market Data dataframe, structured by constuct_freq_df() function
NOTE: Columns indicate current state, rows indicate after state, probabilities are read current -> after probability
sum of columns should add to 1. Since Checkout state is a sink, all after probabilities are 0, not calculated.
'''
df = df_copy.copy()
return pd.crosstab(df['after'], df['location'], normalize=1)
class Customer:
def __init__(self, idn, state, transition_mat):
self.id = idn
self.state = state
self.transition_mat = transition_mat
self.tr_array_dict = {
'dairy' : self.transition_mat[0,:],
'drinks' : self.transition_mat[1,:],
'entry' : self.transition_mat[2,:],
'fruit' : self.transition_mat[3,:],
'spices' : self.transition_mat[4,:]
}
def __repr__(self):
"""
Returns a csv string for that customer.
"""
return f'{self.id};{self.state}'
def is_active(self):
"""
Returns True if the customer has not reached the checkout
for the second time yet, False otherwise.
"""
if self.state != 'checkout':
return True
if self.state == 'checkout':
return False
def next_state(self):
"""
Propagates the customer to the next state
using a weighted random choice from the transition probabilities
conditional on the current state.
Returns nothing.
"""
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])
class SuperMarket:
"""manages multiple Customer instances that are currently in the market.
"""
def __init__(self,transition_matrix):
#List contains the customer objects
self.customers = []
#Timing stuff set to some defults, open and close time get their values from the simulate() method when called
self.open_time = pd.to_datetime('08:00',format='%H:%M')
self.close_time = pd.to_datetime('17:00',format='%H:%M')
self.current_time = | pd.to_datetime('08:00',format='%H:%M') | pandas.to_datetime |
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pytest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_series_equal
import windpowerlib.wind_farm as wf
import windpowerlib.wind_turbine as wt
import windpowerlib.wind_turbine_cluster as wtc
import windpowerlib.turbine_cluster_modelchain as tc_mc
class TestTurbineClusterModelChain:
@classmethod
def setup_class(self):
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
wind_speed_8m = np.array([[4.0], [5.0]])
wind_speed_10m = np.array([[5.0], [6.5]])
roughness_length = np.array([[0.15], [0.15]])
self.weather_df = pd.DataFrame(
np.hstack(
(
temperature_2m,
temperature_10m,
pressure_0m,
wind_speed_8m,
wind_speed_10m,
roughness_length,
)
),
index=[0, 1],
columns=[
np.array(
[
"temperature",
"temperature",
"pressure",
"wind_speed",
"wind_speed",
"roughness_length",
]
),
np.array([2, 10, 0, 8, 10, 0]),
],
)
self.test_turbine = {
"hub_height": 100,
"rotor_diameter": 80,
"turbine_type": "E-126/4200",
}
self.test_turbine_2 = {
"hub_height": 90,
"rotor_diameter": 60,
"turbine_type": "V90/2000",
"nominal_power": 2000000.0,
}
self.test_farm = {
"wind_turbine_fleet": [
{
"wind_turbine": wt.WindTurbine(**self.test_turbine),
"number_of_turbines": 3,
}
]
}
self.test_farm_2 = {
"name": "test farm",
"wind_turbine_fleet": [
{
"wind_turbine": wt.WindTurbine(**self.test_turbine),
"number_of_turbines": 3,
},
{
"wind_turbine": wt.WindTurbine(**self.test_turbine_2),
"number_of_turbines": 3,
},
],
}
self.test_cluster = {
"name": "example_cluster",
"wind_farms": [
wf.WindFarm(**self.test_farm),
wf.WindFarm(**self.test_farm_2),
],
}
def test_run_model(self):
parameters = {
"wake_losses_model": "dena_mean",
"smoothing": False,
"standard_deviation_method": "turbulence_intensity",
"smoothing_order": "wind_farm_power_curves",
}
# Test modelchain with default values
power_output_exp = pd.Series(
data=[4198361.4830405945, 8697966.121234536],
name="feedin_power_plant",
)
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=wf.WindFarm(**self.test_farm), **parameters
)
test_tc_mc.run_model(self.weather_df)
| assert_series_equal(test_tc_mc.power_output, power_output_exp) | pandas.util.testing.assert_series_equal |
### gcode_reader in code folder
### instructions in SETUP.txt
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##################################
# University of Wisconsin-Madison
# Author: <NAME>
##################################
"""
Gcode reader for both FDM (regular and Stratasys) and LPBF.
It supports the following functionalities
1. plot a layer in 2D, plot layers in 3D
2. list important information of path
3. animate the printing of a layer in 2D, animate the printing of layers in 3D
4. mesh the path, plot mesh, list important informations about the mesh
## below two features are under construction
5. compute closest left element and right element
6. shrink and convert FDM process plan to PBF S-Code
"""
# standard library
import argparse
import collections
from enum import Enum
import math
import os.path
import pprint
import statistics
import sys
import PRNTR
import Image_maker
# third party library
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
path1 = PRNTR.location
imagename2 = Image_maker.namer
def GCR():
# sns.set() # use seaborn style
# maximum element length in meshing
MAX_ELEMENT_LENGTH = 1 # FDM regular
# MAX_ELEMENT_LENGTH = 5 # FDM Stratasys
# MAX_ELEMENT_LENGTH = 10 # four-spirals scode
# MAX_ELEMENT_LENGTH = 50e-6 # LPBF
# MAX_ELEMENT_LENGTH = 100e-6 # LPBF (for plot mesh example)
# set true to keep support path
PLOT_SUPPORT = True
# set true to use one color for plot
# set false to use random color for plot
SINGLE_COLOR = False
# set true to plot scans with positive power in different color
# this is for powder bed fusion
PLOT_POWER = True
POWER_ZERO = 1
IGNORE_ZERO_POWER = True
# Element namedtuple
Element = collections.namedtuple('Element', ['x0', 'y0', 'x1', 'y1', 'z'])
# set true to add axis-label and title
FIG_INFO = False
# MARGIN RATIO
MARGIN_RATIO = 0.2
# zero tolerance for is_left check
ZERO_TOLERANCE = 1e-12
# global variables
pp = pprint.PrettyPrinter(indent=4)
### under construction
# plot polygon
HALF_WIDTH = 0.6 # FDM regular
# HALF_WIDTH = 1.5 # FDM stratasys
# HALF_WIDTH = 50e-6
## This is for research...
# FDM regular: current 0.5 mm = 500 mu, target 50 mu
# FDM stratasys: current 1.4 mm = 1400 mu, target 50 mu
# HORIZONTAL_SHRINK_RATIO = 0.0001 # tweety and octo
# HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (1400 / 50)) # mobius arm
# HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (1500 / 50)) # bunny
# HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (600 / 25)) # bunny
HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (600 / 25)) # wrench
DELTA_Z = 2e-5
LASER_POWER = 195
LASER_SPEED = 0.8
TRAVEL_SPEED = 0.8
def axisEqual3D(ax):
"""set 3d axis equal."""
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/4
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def save_figure(fig, filename, dpi):
"""
#save figure to a file
def Args():
fig: figure
filename: outfilename
#dpi: dpi of the figure
"""
_, ext = filename.rsplit('.', 1)
fig.savefig(filename, format=ext, dpi=dpi, bbox_inches='tight')
print('saving to {:s} with {:d} DPI'.format(filename, dpi))
def create_axis(figsize=(8, 8), projection='2d'):
"""
create axis based on figure size and projection
returns fig, ax
Args:
figsize: size of the figure
projection: dimension of figure
Returns:
fig, ax
"""
projection = projection.lower()
if projection not in ['2d', '3d']:
raise ValueError
if projection == '2d':
fig, ax = plt.subplots(figsize=figsize)
else: # '3d'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
return fig, ax
def create_movie_writer(title='Movie Writer', fps=15):
"""
create ffmpeg writer
Args:
title: title of the movie writer
fps: frames per second
Returns:
movie writer
"""
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=title, artist='Matplotlib',
comment='Movie Support')
writer = FFMpegWriter(fps=15, metadata=metadata)
return writer
def add_margin_to_axis_limits(min_v, max_v, margin_ratio=MARGIN_RATIO):
"""
compute new min_v and max_v based on margin
Args:
min_v: minimum value
max_v: maximum value
margin_ratio:
Returns:
new_min_v, new_max_v
"""
dv = (max_v - min_v) * margin_ratio
return (min_v - dv, max_v + dv)
class LayerError(Exception):
""" layer number error """
pass
class GcodeType(Enum):
""" enum of GcodeType """
FDM_REGULAR = 1
FDM_STRATASYS = 2
LPBF_REGULAR = 3
LPBF_SCODE = 4
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
class GcodeReader:
""" Gcode reader class """
def __init__(self, filename, filetype=GcodeType.FDM_REGULAR):
if not os.path.exists(filename):
print("{} does not exist!".format(filename))
sys.exit(1)
self.filename = filename
self.filetype = filetype
# print(self.filetype)
self.n_segs = 0 # number of line segments
self.segs = None # list of line segments [(x0, y0, x1, y1, z)]
self.n_layers = 0 # number of layers
# seg_index_bars and subpath_index_bars have the same format
# e.g. ith layer has segment indexes [seg_index_bars[i-1],
# seg_index_bars[i])
self.seg_index_bars = []
self.subpath_index_bars = []
self.summary = None
self.lengths = None
self.subpaths = None
self.xyzlimits = None
self.elements = None
self.elements_index_bars = []
# read file to populate variables
self._read()
def mesh(self, max_length):
""" mesh segments according to max_length """
self.elements = []
self.elements_index_bars = []
bar = 0
n_eles = 0
if not hasattr(self, 'powers'):
self.powers = [POWER_ZERO + 10] * len(self.segs)
for i, (x0, y0, x1, y1, z) in enumerate(self.segs):
if i == self.seg_index_bars[bar]:
bar += 1
self.elements_index_bars.append(n_eles)
power = self.powers[i]
if power < POWER_ZERO:
continue
length = np.hypot(x0 - x1, y0 - y1)
n_slices = int(np.ceil(length / max_length))
n_eles += n_slices
dx = (x1 - x0) / n_slices
dy = (y1 - y0) / n_slices
for _ in range(n_slices - 1):
# self.elements.append((x0, y0, x0 + dx, y0 + dy, z))
self.elements.append(Element(x0, y0, x0 + dx, y0 + dy, z))
x0, y0 = x0 + dx, y0 + dy
# self.elements.append((x0, y0, x1, y1, z))
self.elements.append(Element(x0, y0, x1, y1, z))
self.elements_index_bars.append(n_eles)
# print(self.elements_index_bars)
print("Meshing finished, {:d} elements generated".
format(len(self.elements)))
def plot_mesh_layer(self, layernum, ax=None):
""" plot mesh in one layer """
if not self.elements:
self.mesh(max_length=MAX_ELEMENT_LENGTH)
fig, ax = self.plot_layer(layer=layernum)
# if not ax:
# fig, ax = create_axis(projection='2d')
left, right = self.elements_index_bars[layernum - 1:layernum + 1]
print(left, right)
for x0, y0, x1, y1, _ in self.elements[left:right]:
# ax.plot([x0, x1], [y0, y1], 'b-')
# ax.scatter(0.5 * (x0 + x1), 0.5 * (y0 + y1), s=4, color='r')
ax.plot([0.5 * (x0 + x1)], [0.5 * (y0 + y1)], 'ro', markersize=4)
return fig, ax
def convert_to_scode(self):
""" convert path to scode file. """
name, _ = self.filename.rsplit('.', 1)
outpath = "{}.scode".format(name)
old_z = -np.inf
z = -DELTA_Z
old_x0 = old_y0 = old_x1 = old_y1 = -np.inf
with open(outpath, 'w') as out_f:
out_f.write('# x1 y1 x2 y2 z power speed \n')
for x0, y0, x1, y1, cur_z in self.segs:
x0 *= HORIZONTAL_SHRINK_RATIO
y0 *= HORIZONTAL_SHRINK_RATIO
x1 *= HORIZONTAL_SHRINK_RATIO
y1 *= HORIZONTAL_SHRINK_RATIO
if old_x0 != -np.inf and (old_x1 != x0 or old_y1 != y0 or cur_z != old_z):
out_f.write("{:.8f} {:.8f} {:.8f} {:.8f} {:.8f} {:d} {:.4f}\n".format(old_x1, old_y1, x0, y0, z, 0, TRAVEL_SPEED))
if cur_z > old_z:
z += DELTA_Z
old_z = cur_z
old_x0 = x0
old_y0 = y0
old_x1 = x1
old_y1 = y1
# check if two segs are connected
out_f.write("{:.8f} {:.8f} {:.8f} {:.8f} {:.8f} {:d} {:.4f}\n".format(x0, y0, x1, y1, z, LASER_POWER, LASER_SPEED))
print('Save path to s-code file {}'.format(outpath))
def plot_mesh(self, ax=None):
""" plot mesh """
if not self.elements:
self.mesh()
if not ax:
fig, ax = create_axis(projection='3d')
for x0, y0, x1, y1, z in self.elements:
ax.plot([x0, x1], [y0, y1], [z, z], 'b-')
ax.scatter(0.5 * (x0 + x1), 0.5 * (y0 + y1), z, 'r', s=4,
color='r')
return fig, ax
def _read(self):
"""
read the file and populate self.segs, self.n_segs and
self.seg_index_bars
"""
if self.filetype == GcodeType.FDM_REGULAR:
self._read_fdm_regular()
elif self.filetype == GcodeType.FDM_STRATASYS:
self._read_fdm_stratasys()
elif self.filetype == GcodeType.LPBF_REGULAR:
self._read_lpbf_regular()
elif self.filetype == GcodeType.LPBF_SCODE:
self._read_lpbf_scode()
else:
print("file type is not supported")
sys.exit(1)
self.xyzlimits = self._compute_xyzlimits(self.segs)
def _compute_xyzlimits(self, seg_list):
""" compute axis limits of a segments list """
xmin, xmax = float('inf'), -float('inf')
ymin, ymax = float('inf'), -float('inf')
zmin, zmax = float('inf'), -float('inf')
for x0, y0, x1, y1, z in seg_list:
xmin = min(x0, x1) if min(x0, x1) < xmin else xmin
ymin = min(y0, y1) if min(y0, y1) < ymin else ymin
zmin = z if z < zmin else zmin
xmax = max(x0, x1) if max(x0, x1) > xmax else xmax
ymax = max(y0, y1) if max(y0, y1) > ymax else ymax
zmax = z if z > zmax else zmax
return (xmin, xmax, ymin, ymax, zmin, zmax)
def _read_lpbf_regular(self):
""" read regular LPBF gcode """
with open(self.filename, 'r') as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that starts with 'N'
lines = (line for line in lines if line.startswith('N'))
# pp.pprint(lines) # for debug
self.segs = []
self.powers = []
temp = -float('inf')
ngxyzfl = [temp, temp, temp, temp, temp, temp, temp]
d = dict(zip(['N', 'G', 'X', 'Y', 'Z', 'F', 'L'], range(7)))
seg_count = 0
for line in lines:
old_ngxyzfl = ngxyzfl[:]
tokens = line.split()
for token in tokens:
ngxyzfl[d[token[0]]] = float(token[1:])
if ngxyzfl[d['Z']] > old_ngxyzfl[d['Z']]:
self.n_layers += 1
self.seg_index_bars.append(seg_count)
if (ngxyzfl[1] == 1 and ngxyzfl[2:4] != old_ngxyzfl[2:4]
and ngxyzfl[4] == old_ngxyzfl[4]
and ngxyzfl[5] > 0):
x0, y0, z = old_ngxyzfl[2:5]
x1, y1 = ngxyzfl[2:4]
self.segs.append((x0, y0, x1, y1, z))
self.powers.append(ngxyzfl[-1])
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
# print(self.n_layers)
# print(self.powers)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_lpbf_scode(self):
""" read LPBF scode """
with open(self.filename, 'r') as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that not starts with '#'
lines = (line for line in lines if not line.startswith('#'))
# pp.pprint(lines) # for debug
self.segs = []
self.powers = []
seg_count = 0
old_z = -np.inf
for line in lines:
x0, y0, x1, y1, z, power, speed = map(float, line.split())
if z > old_z:
self.n_layers += 1
self.seg_index_bars.append(seg_count)
old_z = z
self.segs.append((x0, y0, x1, y1, z))
self.powers.append(power)
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
# print(self.segs)
self.seg_index_bars.append(self.n_segs)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_fdm_regular(self):
""" read fDM regular gcode type """
with open(self.filename, 'r') as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that starts with 'G'
# lines = (line for line in lines if line.startswith('G'))
new_lines = []
for line in lines:
if line.startswith('G'):
idx = line.find(';')
if idx != -1:
line = line[:idx]
new_lines.append(line)
lines = new_lines
# pp.pprint(lines) # for debug
self.segs = []
temp = -float('inf')
gxyzef = [temp, temp, temp, temp, temp, temp]
d = dict(zip(['G', 'X', 'Y', 'Z', 'E', 'F'], range(6)))
seg_count = 0
mx_z = -math.inf
for line in lines:
old_gxyzef = gxyzef[:]
for token in line.split():
gxyzef[d[token[0]]] = float(token[1:])
"""
# if gxyzef[3] > old_gxyzef[3]: # z value
# it may lift z in the beginning or during the printing process
if gxyzef[4] > old_gxyzef[4] and gxyzef[3] > mx_z:
mx_z = gxyzef[3]
# print(gxyzef[3], old_gxyzef[3])
self.n_layers += 1
self.seg_index_bars.append(seg_count)
"""
if (gxyzef[0] == 1 and gxyzef[1:3] != old_gxyzef[1:3]
and gxyzef[3] == old_gxyzef[3]
and gxyzef[4] > old_gxyzef[4]):
# update layer here
# print(gxyzef[3], mx_z)
if gxyzef[3] > mx_z:
mx_z = gxyzef[3]
self.n_layers += 1
self.seg_index_bars.append(seg_count)
x0, y0, z = old_gxyzef[1:4]
x1, y1 = gxyzef[1:3]
self.segs.append((x0, y0, x1, y1, z))
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_fdm_stratasys(self):
""" read stratasys fdm G-code file """
self.areas = []
self.is_supports = []
self.styles = []
self.deltTs = []
self.segs = []
temp = -float('inf')
# x, y, z, area, deltaT, is_support, style
xyzATPS = [temp, temp, temp, temp, temp, False, '']
seg_count = 0
with open(self.filename, 'r') as in_file:
lines = in_file.readlines()
# means position denoted by the line is the start of subpath
is_start = True
for line in lines:
# filter out supports path
if not PLOT_SUPPORT and 'True' in line:
continue
if line.startswith('#'):
continue
if not line.strip(): # skip empty line
start = True
continue
old_xyzATPS = xyzATPS[:]
tokens = line.split()
# print(tokens)
xyzATPS[:5] = [float(token) for token in tokens[:5]]
xyzATPS[5] = bool(tokens[5])
xyzATPS[6] = tokens[6]
if xyzATPS[2] != old_xyzATPS[2]: # z value
self.seg_index_bars.append(seg_count)
self.n_layers += 1
elif not start:
# make sure is_support and style do not change
assert(xyzATPS[5:] == old_xyzATPS[5:])
x0, y0 = old_xyzATPS[:2]
x1, y1, z = xyzATPS[:3]
self.segs.append((x0, y0, x1, y1, z))
seg_count += 1
self.areas.append(xyzATPS[3])
self.deltTs.append(xyzATPS[4])
self.is_supports.append(xyzATPS[5])
self.styles.append(xyzATPS[6])
start = False
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
# print(self.seg_index_bars)
def _compute_subpaths(self):
""" compute subpaths
a subpath is represented by (xs, ys, zs)
subpath makes it easier to plot
"""
if not self.subpaths:
self.subpaths = []
self.subpath_index_bars = [0]
x0, y0, x1, y1, z = self.segs[0, :]
xs, ys, zs = [x0, x1], [y0, y1], [z, z]
mx_z = zs[-1]
for x0, y0, x1, y1, z in self.segs[1:, :]:
if x0 != xs[-1] or y0 != ys[-1] or z != zs[-1]:
self.subpaths.append((xs, ys, zs))
# if z != zs[-1]:
if z > mx_z:
mx_z = z
self.subpath_index_bars.append(len(self.subpaths))
xs, ys, zs = [x0, x1], [y0, y1], [z, z]
else:
xs.append(x1)
ys.append(y1)
zs.append(z)
if len(xs) != 0:
self.subpaths.append((xs, ys, zs))
self.subpath_index_bars.append(len(self.subpaths))
# print(self.subpath_index_bars)
# print(self.segs)
def _compute_center_distance(self, i, j):
"""compute center distance between element i and j."""
n = len(self.elements)
assert(i < n and j < n)
elements = self.elements
ax = 0.5 * (elements[i].x0 + elements[i].x1)
ay = 0.5 * (elements[i].y0 + elements[i].y1)
bx = 0.5 * (elements[j].x0 + elements[j].x1)
by = 0.5 * (elements[j].y0 + elements[j].y1)
return math.sqrt((ax - bx) ** 2 + (ay - by) ** 2)
def _compute_parallel_distance(self, i, j):
"""compute the parallel distance between element i and j."""
n = len(self.elements)
assert(i < n and j < n)
elements = self.elements
x = 0.5 * (elements[i].x0 + elements[i].x1)
y = 0.5 * (elements[i].y0 + elements[i].y1)
ax, ay, bx, by, _ = elements[j]
dx = ax - bx
dy = ay - by
deno = math.sqrt(dx * dx + dy * dy)
nume = abs((by - ay) * x - (bx - ax) * y + bx * ay - by * ax)
return nume / deno
def _is_element_nearly_parallel(self, i, j, threshold):
"""check if element i and element j are nearly parallel."""
n = len(self.elements)
assert(i < n and j < n)
elements = self.elements
ax, ay, bx, by, _ = elements[i]
cx, cy, dx, dy, _ = elements[j]
dx1 = bx - ax
dy1 = by - ay
dx2 = dx - cx
dy2 = dy - cy
cos_theta = abs((dx1 * dx2 + dy1 * dy2) / (math.sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2))))
return True if 1 - cos_theta < threshold else False
def _is_element_left(self, i, j):
"""check if element j is on the left of element i."""
n = len(self.elements)
assert(i < n and j < n)
assert(self.elements[i].z == self.elements[j].z)
elements = self.elements
ax, ay, bx, by, _ = elements[i]
cx = 0.5 * (elements[j].x0 + elements[j].x1)
cy = 0.5 * (elements[j].y0 + elements[j].y1)
cross_product = (bx - ax) * (cy - ay) - (cx - ax) * (by - ay)
if abs(cross_product) < ZERO_TOLERANCE:
return 0
else:
return 1 if cross_product > 0 else -1
def compute_nearest_neighbors(self, layer=0):
"""compute nearest neighbors for each element."""
if not self.elements:
self.mesh(max_length=MAX_ELEMENT_LENGTH)
start_idx, end_idx = self.elements_index_bars[layer - 1:layer + 1]
INF = math.inf
left_neis = []
right_neis = []
print(start_idx, end_idx)
for i in range(start_idx, end_idx):
left_mn = INF
right_mn = INF
# left_is_left = 0
# right_is_left = 0
left_idx = -1
right_idx = -1
for j in range(start_idx, end_idx):
if j == i:
continue
if (self._is_element_nearly_parallel(i, j, 0.0001) and
self._compute_center_distance(i, j) < 2.0 * HALF_WIDTH * 2):
is_left = self._is_element_left(i, j)
distance = self._compute_parallel_distance(i, j)
if distance < 0.4 * HALF_WIDTH * 2:
continue
# print(distance, is_left)
if is_left == 1:
if distance < left_mn:
left_idx = j
left_mn = distance
elif is_left == -1:
if distance < right_mn:
right_idx = j
right_mn = distance
# print("{:d} {:f} {:f}".format(i, left_mn, right_mn))
# if left_mn > 5:
left_neis.append((left_idx, left_mn))
right_neis.append((right_idx, right_mn))
print("Finished computing left and right neighbors.")
return left_neis, right_neis
def plot_neighbors_layer(self, layer=0):
"""plot neighbors in a layer."""
left_neis, right_neis = self.compute_nearest_neighbors(layer)
#"""
fig, ax = self.plot_mesh_layer(layer)
left, right = self.elements_index_bars[layer - 1:layer + 1]
print(left, right)
es = self.elements
for idx, (x0, y0, x1, y1, _) in enumerate(self.elements[left:right]):
xc = 0.5 * (x0 + x1)
yc = 0.5 * (y0 + y1)
# ax.plot([0.5 * (x0 + x1)], [0.5 * (y0 + y1)], 'ro', markersize=1.5)
left_idx, left_mn = left_neis[idx]
if left_idx != -1:
lx = 0.5 * (es[left_idx].x0 + es[left_idx].x1)
ly = 0.5 * (es[left_idx].y0 + es[left_idx].y1)
# print(left_mn, math.sqrt((lx - xc) ** 2 + (ly - yc) ** 2),self._compute_parallel_distance(idx, left_idx))
ax.plot([xc, lx], [yc, ly], 'r-')
right_idx, right_mn = right_neis[idx]
if right_idx != -1:
rx = 0.5 * (es[right_idx].x0 + es[right_idx].x1)
ry = 0.5 * (es[right_idx].y0 + es[right_idx].y1)
# print(left_mn, math.sqrt((lx - xc) ** 2 + (ly - yc) ** 2),self._compute_parallel_distance(idx, left_idx))
ax.plot([xc, rx], [yc, ry], 'r-')
#"""
# plot histogram
left_mns = [mn for idx, mn in left_neis if idx != -1]
print("left median = {}".format(statistics.median(left_mns)))
print("left mean = {}".format(statistics.mean(left_mns)))
print("left min = {}".format(min(left_mns)))
print("left max = {}".format(max(left_mns)))
right_mns = [mn for idx, mn in right_neis if idx != -1]
print("right median = {}".format(statistics.median(right_mns)))
print("right mean = {}".format(statistics.mean(right_mns)))
print("right min = {}".format(min(right_mns)))
print("right max = {}".format(max(right_mns)))
fig2, ax2 = plt.subplots(figsize=(8, 8))
ax2.boxplot(left_mns)
# return fig, ax
return fig2, ax2
def plot_polygon_layer(self, layer):
"""plot element polygons in one layer. """
left_neis, right_neis = self.compute_nearest_neighbors(layer)
fig, ax = self.plot_mesh_layer(layer)
left, right = self.elements_index_bars[layer - 1:layer + 1]
# print(left, right)
es = self.elements
for idx, (sx, sy, ex, ey, _) in enumerate(self.elements[left:right]):
reverse = False
if sx > ex or ey < sy:
sx, sy, ex, ey = ex, ey, sx, sy
reverse = True
dx = ex - sx
dy = ey - sy
theta = np.arctan2(dy, dx)
beta = theta + np.pi / 2.0
lw = HALF_WIDTH
left_idx, left_mn = left_neis[idx]
if left_mn / 2 < lw:
lw = left_mn / 2
rw = HALF_WIDTH
right_idx, right_mn = right_neis[idx]
if right_mn / 2 < rw:
rw = right_mn / 2
if reverse:
lw, rw = rw, lw
x1 = sx - rw * np.cos(beta)
y1 = sy - rw * np.sin(beta)
x2 = ex - rw * np.cos(beta)
y2 = ey - rw * np.sin(beta)
x3 = ex + lw * np.cos(beta)
y3 = ey + lw * np.sin(beta)
x4 = sx + lw * np.cos(beta)
y4 = sy + lw * np.sin(beta)
ax.plot([x1, x2, x3, x4, x1], [y1, y2, y3, y4, y1], 'r-')
return fig, ax
def plot(self, color='blue', ax=None):
""" plot the whole part in 3D """
if not ax:
fig, ax = create_axis(projection='3d')
assert(self.n_segs > 0)
self._compute_subpaths()
for xs, ys, zs in self.subpaths:
if SINGLE_COLOR:
ax.plot(xs, ys, zs, color=color)
else:
ax.plot(xs, ys, zs)
xmin, xmax, ymin, ymax, _, _ = self.xyzlimits
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
ax.set_xlim(add_margin_to_axis_limits(xmin, xmax))
ax.set_ylim(add_margin_to_axis_limits(ymin, ymax))
return fig, ax
def plot_layers(self, min_layer, max_layer, ax=None):
""" plot the layers in [min_layer, max_layer) in 3D """
if (min_layer >= max_layer or min_layer < 1 or max_layer >
self.n_layers + 1):
raise LayerError("Layer number is invalid!")
self._compute_subpaths()
if not ax:
fig, ax = create_axis(projection='3d')
left, right = (self.subpath_index_bars[min_layer - 1],
self.subpath_index_bars[max_layer - 1])
for xs, ys, zs in self.subpaths[left: right]:
ax.plot(xs, ys, zs)
return fig, ax
def plot_layer(self, layer=1, ax=None):
""" plot a specific layer in 2D """
# make sure layer is in [1, self.n_layers]
# layer = max(layer, 1)
# layer = min(self.n_layers, layer)
if layer < 1 or layer > self.n_layers:
raise LayerError("Layer number is invalid!")
self._compute_subpaths()
if not hasattr(self, 'powers'):
self.powers = [POWER_ZERO + 10] * len(self.segs)
if not ax:
fig, ax = create_axis(projection='2d')
if not PLOT_POWER:
left, right = (self.subpath_index_bars[layer - 1],
self.subpath_index_bars[layer])
for xs, ys, _ in self.subpaths[left: right]:
ax.plot(xs, ys)
"""
if SINGLE_COLOR:
if (IGNORE_ZERO_POWER and power > POWER_ZERO) or (not IGNORE_ZERO_POWER):
ax.plot(xs, ys, color='blue')
else:
if (IGNORE_ZERO_POWER and power > POWER_ZERO) or (not IGNORE_ZERO_POWER):
ax.plot(xs, ys)
"""
else:
left, right = (self.seg_index_bars[layer - 1],
self.seg_index_bars[layer])
for (x1, y1, x2, y2, z), power in zip(self.segs, self.powers):
if power > POWER_ZERO:
ax.plot([x1, x2], [y1, y2], 'r-')
else:
if not IGNORE_ZERO_POWER:
ax.plot([x1, x2], [y1, y2], 'b-')
ax.axis('equal')
return fig, ax
def describe_mesh(self, max_length):
"""print basic information of meshing"""
if not self.elements:
self.mesh(max_length)
self.mesh_lengths = [np.hypot(x1 - x0, y1 - y0) for x0, y0, x1, y1, _
in self.elements]
series = | pd.Series(self.mesh_lengths) | pandas.Series |
from numpy import dtype
def estado_civil_dummy():
dic_estado={"Separado(a) o divorciado(a)":0,
"Soltero(a)":0,"Casado":1,"En unión libre":1,
"Viudo(a)":0,1.0:1,2.0:1,3.0:0,4.0:0,5.0:0}
return dic_estado
def dic_etnia():
import numpy as np
dic_etnia={"Mestizo":1,'Ninguno de los anteriores':0,"Blanco":1,"Indígena":0,"Negro, mulato (afro descendiente)":1,
"Palenquero":1,np.NaN:0,1.0:1,2.0:1,3.0:1,4.0:1,5.0:1,6.0:1,7.0:1,8.0:0}
return dic_etnia
def cols_names():
names_cols={"actividad_ppal":"employment","sexo":"sex","edad":"age","estado_civil":"couple",
"hijos":"sons","etnia":"ethnicity","Discapacidad":"Disability","educ_años":"educ_years",
"embarazo_hoy":"w_pregnant","lee_escribe":"read_write","estudia":"student",
"n_internet":"internet","Urbano":"Urban"}
return names_cols
def creador_id(data):
try:
data.insert(0,"id",data["DIRECTORIO"]+data["SECUENCIA_P"]+data["ORDEN"]+data["HOGAR"])
data.insert(1,"id_hogar",data["DIRECTORIO"]+data["SECUENCIA_P"])
except:
data.insert(0,"id_hogar",data["DIRECTORIO"]+data["SECUENCIA_P"])
def dic_dtypes():
dtype={"DIRECTORIO":"str",
"SECUENCIA_P":"str",
"ORDEN":"str",
"HOGAR":"str"}
return dtype
def variables_modelo():
variables=["id","id_hogar","ocupado","desocupado","P6020","P6040","ESC","P6080","P6070","P6170","P4030S1A1","P5210S16","P5210S3","P6081","P6083","DPTO_x"]
return variables
def procces_data_month(mes,variables):
import pandas as pd
dtype=dic_dtypes()
Ac=pd.read_csv(f"sets_model/{mes}/Acaracteristicas.csv",sep=";",dtype=dtype)
Ao=pd.read_csv(f"sets_model/{mes}/Aocupados.csv",sep=";",dtype=dtype)
Ad=pd.read_csv(f"sets_model/{mes}/Adesocupados.csv",sep=";",dtype=dtype)
Av=pd.read_csv(f"sets_model/{mes}/Avivienda.csv",sep=";",dtype=dtype)
Cc=pd.read_csv(f"sets_model/{mes}/Ccaracteristicas.csv",sep=";",dtype=dtype)
Co=pd.read_csv(f"sets_model/{mes}/Cocupados.csv",sep=";",dtype=dtype)
Cd=pd.read_csv(f"sets_model/{mes}/Cdesocupados.csv",sep=";",dtype=dtype)
Cv=pd.read_csv(f"sets_model/{mes}/Cvivienda.csv",sep=";",dtype=dtype)
Rc=pd.read_csv(f"sets_model/{mes}/Rcaracteristicas.csv",sep=";",dtype=dtype)
Ro=pd.read_csv(f"sets_model/{mes}/Rocupados.csv",sep=";",dtype=dtype)
Rd=pd.read_csv(f"sets_model/{mes}/Rdesocupados.csv",sep=";",dtype=dtype)
Rv=pd.read_csv(f"sets_model/{mes}/Rvivienda.csv",sep=";",dtype=dtype)
for k in [Ao,Co,Ro]:
k.insert(0,"ocupado",1)
for g in [Ad,Cd,Rd]:
g.insert(0,"desocupado",1)
A=[Ac,Ao,Ad,Av]
C=[Cc,Co,Cd,Cv]
R=[Rc,Ro,Rd,Rv]
for j in A:
creador_id(j)
for j in C:
creador_id(j)
for j in R:
creador_id(j)
datos_a=pd.merge(Ac,Ao,on="id",how="outer",suffixes=("","_x"))
datos_a=pd.merge(datos_a,Ad,on="id",how="outer",suffixes=("","_x"))
datos_a= | pd.merge(datos_a,Av,on="id_hogar",how="outer") | pandas.merge |
import sys
import numpy as np
import pandas as pd
from ar6_ch6_rcmipfigs.constants import BASE_DIR
from pathlib import Path
path_FaIR_header_general_info = Path(BASE_DIR) / 'misc/badc_header_FaIR_model.csv'
path_FaIR_warming_header_general_info = Path(BASE_DIR) / 'misc/badc_header_FaIR_model_warming.csv'
path_FaIR_hist_header_general_info = Path(BASE_DIR) / 'misc/badc_header_FaIR_model_hist.csv'
# %%
fp_example = 'ar6_ch6_rcmipfigs/data_in/SSPs_badc-csv/ERF_ssp119_1750-2500.csv'
fp_example_test = 'ar6_ch6_rcmipfigs/data_in/SSPs_badc-csv/ERF_ssp119_1750-2500_test.csv'
fp_orig_example = 'ar6_ch6_rcmipfigs/data_in/SSPs/ERF_ssp119_1750-2500.csv'
# %%
def get_header_length(fp):
"""
Finds the header length in a BADC csv file
:param fp: file path
:return:
"""
cnt_data = 0
with open(fp) as f:
line = f.readline()
cnt = 1
while line:
l_sp = line.split(',')
if l_sp[0].strip() == 'data':
cnt_data = cnt
break
line = f.readline()
cnt += 1
return cnt_data
# %%
def read_csv_badc(fp, **kwargs):
# %%
if kwargs is None:
kwargs = {'index_col': 0}
length_header = get_header_length(fp)
if 'header' in kwargs.keys():
hd = kwargs['header']
if type(hd) is list:
hd: list
length_header = list(np.array(hd) + length_header - hd[0])
del kwargs['header']
df = pd.read_csv(fp, skipfooter=1, header=length_header, **kwargs, engine='python')
if df.index[-1] == 'end_data':
df = df.drop('end_data', axis=0)
# %%
return df
# %%
def get_global_FaIR(fp=path_FaIR_header_general_info):
df = pd.read_csv(fp, header=None)
df_glob = df[df.iloc[:, 1] == 'G']
return df_glob
# %%
def get_variable_FaIR(fp=path_FaIR_header_general_info):
df = | pd.read_csv(fp, header=None) | pandas.read_csv |
#!/usr/bin/env python
import argparse
import pandas as pd
import os
import re
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_files_with_prefix(dir, prefix):
return [name for name in os.listdir(dir)
if os.path.isfile(os.path.join(dir, name)) and name.startswith(prefix)]
def load_results_data_for_experiment(path):
results = []
eval_files = get_files_with_prefix(path, "evaluation")
for file in eval_files:
eval_path = os.path.join(path, file)
eval_name = re.match("evaluation_(.*)\.", file)[1]
eval_data = | pd.read_json(eval_path, typ="series") | pandas.read_json |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = | pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"]) | pandas.DataFrame |
import finterstellar as fs
import pandas as pd
import numpy as np
import datetime as dt
class LoadData:
def read_investing_price(self, path, cd):
file_name = path + cd + ' Historical Data.csv'
df = pd.read_csv(file_name, index_col='Date')
return (df)
def create_portfolio_df(self, path, p_name, p_cd):
new_df = self.make_historical_price_df(path, p_cd)
prices_df = self.create_master_file(path, p_name, new_df)
prices_df = self.update_master_file(path, p_name, new_df)
return (prices_df)
def make_historical_price_df(self, path, s_cd):
cds = fs.str_list(s_cd)
dates = pd.Series()
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
c = prices_df['Price']
dates_new = pd.Series(prices_df.index)
dates = dates.append(dates_new)
dates = dates.drop_duplicates().sort_values().reset_index()
dates = dates.drop(['index'], axis=1)
universe_df = pd.DataFrame(index=dates[0])
universe_df.index.name = 'Date'
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
prices_df = self.price_df_trimming(prices_df, c)
universe_df[c] = prices_df[c]
universe_df
universe_df = universe_df.fillna(method='ffill')
return (universe_df)
def create_master_file(self, path, f_name, df):
file_name = path + 'fs ' + f_name + '.csv'
try:
f = open(file_name)
print('Updating master file')
f.close()
except IOError as e:
df.index = pd.to_datetime(df.index)
df.index.name = 'Date'
#df = df.fillna(method='ffill')
#today_date = pd.Timestamp.today().date().strftime('%y%m%d')
df.to_csv(file_name)
return (df)
def update_master_file(self, path, n, new_df):
try:
file_name = 'fs ' + n + '.csv'
master_df = self.read_master_file(path, n)
universe_df = new_df.combine_first(master_df)
universe_df.index.name = 'Date'
#universe_df = universe_df.fillna(method='ffill')
universe_df.to_csv(path + file_name)
except IOError as e:
print('Creating master file')
self.create_master_file(path, n, new_df)
universe_df = new_df
return (universe_df)
def read_master_file(self, path, n):
file_name = path + 'fs ' + n + '.csv'
prices_df = pd.read_csv(file_name, index_col='Date')
dates = []
for i in prices_df.index:
d = pd.to_datetime(i)
dates.append(d)
prices_df['Date'] = dates # Date 값 교체
prices_df = prices_df.set_index('Date')
return (prices_df)
def get_codes(self, prices_df):
codes = prices_df.columns.values
return (codes)
def read_raw_csv(self, path, n):
file_name = path + n + '.csv'
df = pd.read_csv(file_name, index_col='Date')
dates = []
for i in df.index:
#d = dt.datetime.strptime(i, '%Y-%m-%d')
d = pd.to_datetime(i)
dates.append(d)
df['Date'] = dates # Date 값 교체
df = df.set_index('Date')
df.sort_index(axis=0, inplace=True)
return (df)
def read_raw_excel(self, path, n, sheet=None):
file_name = path + n
df = pd.read_excel(file_name, index_col=0)
dates = []
for i in df.index:
d = pd.to_datetime(i)
dates.append(d)
df['Date'] = dates # Date 값 교체
df = df.set_index('Date')
df.sort_index(axis=0, inplace=True)
return (df)
def date_formatting(self, df):
dates = []
for i in df.index:
#d = dt.datetime.strptime(df.iloc[i,0], '%b %d, %Y')
#d = pd.to_datetime(df.iloc[i,0])
d = | pd.to_datetime(i) | pandas.to_datetime |
from interface import *
from steps import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from copy import copy
class ADSAApp():
"""The class managing the interface for the project.
:param App app: The curses app wrapper where we will draw the interface.
"""
def __init__(self, app=None):
if app is None:
app = App()
self.app = app
height, width = self.app.stdscr.getmaxyx()
self.widgets = {}
# Main Menu
texts_main_menu = ["Choose a step:", "Step 1", "Step 2", "Step 3", "Step 4", "Exit"]
main_menu = Menu(self._get_coord_centered(height, width, texts_main_menu), texts_main_menu, True, True)
main_menu.bind(lambda x : self.main_menu_function(x))
self.widgets["main_menu"] = main_menu
# Step1 Menu
texts_step1 = [ "Wich datastructure do you want to use ?", "AVL Tree", "Array", "Return"]
step1_menu = Menu(self._get_coord_centered(height, width, texts_step1), texts_step1, True, True)
step1_menu.bind(lambda x : self.step1_menu_function(x))
self.widgets["step1_menu"] = step1_menu
def main_menu_function(self, index):
self.app.stdscr.clear()
if index == 1:
self.widgets["step1_menu"].start(self.app)
elif index == 2:
self._find_impostors("data/adjacency_matrix.txt")
elif index == 3:
self._get_distance("data/graph_crewmates.txt", "data/graph_impostors.txt")
elif index == 4:
#self.display_step4()
self.step4()
elif index == 5:
return False
return True
def step1_menu_function(self, index):
self.app.stdscr.clear()
game = None
if index == 1:
self._play_game("AVLTree")
elif index == 2:
self._play_game("Array")
return False
def _play_game(self, datastructure):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
game = Game(datastructure)
screen_game.insert_line(f"Game created with {datastructure} to store the players.")
for i in range(3):
screen_game.insert_line(f"Playing round {game.round}.")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game()
game.sort_players()
screen_game.insert_line(f"END POOL !")
while game.get_nb_players() > 10:
screen_game.insert_line(f"Playing round {game.round}")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game(True)
game.sort_players()
game.delete_last_player()
screen_game.insert_line(f"FINALS:")
for i in range(5):
screen_game.insert_line(f"Playing round {game.round}")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game(True)
game.sort_players()
last_players = game.players.__str__().split('\n')
if datastructure == "AVLTree":
last_players = last_players[::-1]
for i in range(len(last_players)):
screen_game.insert_line(f"{i + 1}. {last_players[i]}")
screen_game.start(self.app)
def _find_impostors(self, filepath):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
adjacency_matrix = np.genfromtxt(filepath, delimiter=",")
suspects = get_suspects(adjacency_matrix, [0])
screen_game.insert_line("Suspects:")
for key, val in suspects.items():
screen_game.insert_line(f" {key} is a suspect. He met {val} dead player.")
suspects_pair = get_suspects_pairs(suspects, adjacency_matrix, [0])
screen_game.insert_line("")
screen_game.insert_line("Suspects pair:")
for pair in suspects_pair:
screen_game.insert_line(f" {pair[0]} and {pair[1]}")
screen_game.insert_line("")
screen_game.insert_line("Press the escape key to continue...")
screen_game.start(self.app)
def _get_distance(self, filepath_crewmates, filepath_impostors, position=None):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.expand_frame_repr', False) | pandas.set_option |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import (
EssentialFeatureMetabase,
EssentialSampleMetabase,
)
from pmaf.biome.essentials._base import EssentialBackboneBase
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Callable, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class FrequencyTable(
EssentialBackboneBase, EssentialFeatureMetabase, EssentialSampleMetabase
):
"""An essential class for handling frequency data."""
def __init__(
self,
frequency: Union[pd.DataFrame, str],
skipcols: Union[Sequence[Union[str, int]], str, int] = None,
allow_nan: bool = False,
**kwargs
):
"""Constructor for :class:`.FrequencyTable`
Parameters
----------
frequency
Data containing frequency data.
skipcols
Columns to skip when processing data.
allow_nan
Allow NA/NaN values or raise an error.
kwargs
Remaining parameters passed to :func:`~pandas.read_csv` or :mod:`biom` loader
"""
self.__internal_frequency = None
tmp_skipcols = np.asarray([])
tmp_metadata = kwargs.pop("metadata", {})
if skipcols is not None:
if isinstance(skipcols, (str, int)):
tmp_skipcols = np.asarray([skipcols])
elif isinstance(skipcols, (list, tuple)):
if not isinstance(skipcols[0], (str, int)):
tmp_skipcols = np.asarray(skipcols)
else:
raise TypeError(
"`skipcols` can be int/str or list-like of int/str."
)
else:
raise TypeError("`skipcols` can be int/str or list-like of int/str.")
if isinstance(frequency, pd.DataFrame):
if all(frequency.shape):
tmp_frequency = frequency
else:
raise ValueError("Provided `frequency` Datafame is invalid.")
elif isinstance(frequency, str):
if not path.isfile(frequency):
raise FileNotFoundError("Provided `frequency` file path is invalid.")
file_extension = path.splitext(frequency)[-1].lower()
if file_extension in [".csv", ".tsv"]:
tmp_frequency = pd.read_csv(frequency, **kwargs)
elif file_extension in [".biom", ".biome"]:
tmp_frequency, new_metadata = self.__load_biom(frequency, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise TypeError("Provided `frequency` has invalid type.")
if skipcols is not None:
if np.issubdtype(tmp_skipcols.dtype, np.number):
if tmp_frequency.columns.isin(tmp_skipcols).any():
tmp_frequency.drop(columns=tmp_skipcols, inplace=True)
else:
tmp_frequency.drop(
columns=tmp_frequency.columns[tmp_skipcols], inplace=True
)
else:
tmp_frequency.drop(columns=tmp_skipcols, inplace=True)
tmp_dtypes = list(set(tmp_frequency.dtypes.values))
if len(tmp_dtypes) == 1 and | pd.api.types.is_numeric_dtype(tmp_dtypes[0]) | pandas.api.types.is_numeric_dtype |
#-*-coding:utf-8-*-
import numpy as np
import pandas as pd
import time
from bayes_smoothing import *
from sklearn.preprocessing import LabelEncoder
import copy
def roll_browse_fetch(df, column_list):
print("==========================roll_browse_fetch ing==============================")
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
df['%s_browse' %c] = df.groupby(pair)['tmp_count'].cumsum()
del df['tmp_count']
return df
def roll_click_fetch(df, column_list):
print("==========================roll_click_fetch ing==============================")
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
df['%s_click' %c] = df.groupby(pair)['is_trade'].cumsum()
df['%s_click' %c] = df['%s_click' %c]-df['is_trade']
return df
def roll_rate_fetch(df, column_list):
df = roll_browse_fetch(df,column_list)
df = roll_click_fetch(df,column_list)
print("==========================roll_rate_fetch ing==============================\n")
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_rate' %c] = bs_utilize(df['%s_browse' %c], df['%s_click' %c])
# del df['%s_browse' %c]
return df
#===================================按天的转化率==============================
def roll_browse_day(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('day')
df_temp["{}_day_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_day_browse".format(c)] = df_temp["{}_day_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_day_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('day')
df_temp["{}_day_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_day_click".format(c)] = df_temp["{}_day_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_day(df,column_list):
print("==========================roll_rate_day ing==============================")
df = roll_browse_day(df,column_list)
df =roll_click_day(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_day_rate' %c] = bs_utilize(df['%s_day_browse' %c], df['%s_day_click' %c])
# del df['%s_day_browse'%c]
# del df['%s_day_click'%c]
return df
#===================================按天小时的转化率==============================
def roll_browse_day_hour(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
pair.append('hour')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('day')
pair_temp.remove('hour')
df_temp["{}_day_hour_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_day_hour_browse".format(c)] = df_temp["{}_day_hour_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_day_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
pair.append('hour')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('day')
pair_temp.remove('hour')
df_temp["{}_day_hour_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_day_hour_click".format(c)] = df_temp["{}_day_hour_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_day_hour(df,column_list):
print("==========================roll_rate_day ing==============================")
df = roll_browse_day_hour(df,column_list)
df =roll_click_day_hour(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_day_hour_rate' %c] = bs_utilize(df['%s_day_hour_browse' %c], df['%s_day_hour_click' %c])
# del df['%s_day_browse'%c]
# del df['%s_day_click'%c]
return df
#===================================按小时的转化率==============================
def roll_browse_hour(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('hour')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('hour')
df_temp["{}_hour_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_hour_browse".format(c)] = df_temp["{}_hour_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('hour')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('hour')
df_temp["{}_hour_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_hour_click".format(c)] = df_temp["{}_hour_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_hour(df,column_list):
print("==========================roll_rate_hour ing==============================")
df = roll_browse_hour(df,column_list)
df =roll_click_hour(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_hour_rate' %c] = bs_utilize(df['%s_hour_browse' %c], df['%s_hour_click' %c])
return df
def label_encoding(df, columns):
for c in columns:
le = LabelEncoder()
df[c] = le.fit_transform(df[c])
return df
# # #----------------统计特征-----------------
# def get_last_diff_statistic(data,col_list, n_last_diff):
# print("=======get_last_diff============\n")
# data_temp = data
# col_id = col_list[0],col_list[1]
# data = data.sort_values([col_id, 'timestamp'])
# data['next_id'] = data[col_id].shift(-1)
# data['next_actionTime'] = data.timestamp.shift(-1)
# data = data.loc[data.next_id == data[col_id]].copy()
# data['action_diff'] = data['next_actionTime'] - data['timestamp']
# if n_last_diff is not None:
# df_n_last_diff = data.groupby(col_id, as_index=False).tail(n_last_diff).copy()
# df_last_diff_statistic = df_n_last_diff.groupby(col_id, as_index=False).action_diff.agg({
# '{}_last_{}_action_diff_mean'.format(col_id,n_last_diff): np.mean,
# '{}_last_{}_action_diff_std'.format(col_id,n_last_diff): np.std,
# '{}_last_{}_action_diff_max'.format(col_id,n_last_diff): np.max,
# '{}_last_{}_action_diff_min'.format(col_id,n_last_diff): np.min
# })
# else:
# grouped_user = data.groupby(col_id, as_index=False)
# n_last_diff = 'all'
# df_last_diff_statistic = grouped_user.action_diff.agg({
# '{}_last_{}_action_diff_mean'.format(col_id,n_last_diff): np.mean,
# '{}_last_{}_action_diff_std'.format(col_id,n_last_diff): np.std,
# '{}_last_{}_action_diff_max'.format(col_id,n_last_diff): np.max,
# '{}_last_{}_action_diff_min'.format(col_id,n_last_diff): np.min
# })
# res_data = pd.merge(data_temp,df_last_diff_statistic,how="left",on = col_id)
# return res_data
# #-----------------------时间特征-----------------------
# # #--时间间隔特征、
# def chafen(df):
# return pd.DataFrame(np.diff(df,axis = 0))
# def get_last_diff(data, col_list,n_last_diff):
# """获取最后 n_last_diff 个动作之间的时间间隔"""
# print("=======get_last_diff============\n")
# for col in col_list:
# col_sort = col.copy()
# col_sort.append('timestamp')
# data = data.sort_values(col_sort,ascending = False)
# data_temp = data.groupby(col)['timestamp'].apply(chafen).reset_index()
# data_temp.columns = [col[0],col[1],'level','time_gap']
# data_temp = data_temp.loc[data_temp.level<n_last_diff]
# data_temp['time_gap'] = -1*data_temp['time_gap']
# data_temp['level'] = str(col[0])+"_"+str(col[1])+"_last_time_gap"+ data_temp['level'].astype('str')
# data_temp = pd.pivot_table(data_temp,index=[col[0],col[1]],values='time_gap',columns='level').reset_index()
# res_data = pd.merge(data,data_temp,how="left",on = [col[0],col[1]])
# return res_data
#--时间间隔特征
def time_diff_feat(data,col_list):
print("get tiem diff...")
for col in col_list:
col_sort = copy.copy(col)
col_sort.append('timestamp')
data_temp = data.sort(col_sort,ascending = True)
data_temp['{}_{}_time_diff'.format(col[0],col[1])] = data_temp.groupby(col)['timestamp'].apply(lambda x:x.diff())
data['{}_{}_time_diff'.format(col[0],col[1])] = data_temp['{}_{}_time_diff'.format(col[0],col[1])].fillna(0)
return data
def CombinationFeature(data):
print("==============convert_data===============")
data['tm_hour'] = data['hour'] + data['min']/60
data['tm_hour_sin'] = data['tm_hour'].map(lambda x:np.sin((x-12)/24*2*np.pi))
data['tm_hour_cos'] = data['tm_hour'].map(lambda x:np.cos((x-12)/24*2*np.pi))
data_time=data[['user_id','day','hour','min']]
user_query_day = data.groupby(['user_id', 'day']).size().reset_index().rename(columns={0: 'user_query_day'})
user_query_day_hour = data.groupby(['user_id', 'day', 'hour']).size().reset_index().rename(columns={0: 'user_query_day_hour'})
user_query_day_hour_min = data.groupby(['user_id', 'day', 'hour','min']).size().reset_index().rename(columns={0: 'user_query_day_hour_min'})
user_query_day_hour_min_sec = data.groupby(['user_id', 'day', 'hour','min','sec']).size().reset_index().rename(columns={0: 'user_query_day_hour_min_sec'})
user_day_hourmin_mean= data_time.groupby(['user_id', 'day']).mean().reset_index().rename(columns={'hour': 'mean_hour','min':'mean_minuite'})
user_day_hourmin_std= data_time.groupby(['user_id', 'day']).std().reset_index().rename(columns={'hour': 'std_hour','min':'std_minuite'})
user_day_hourmin_max= data_time.groupby(['user_id', 'day']).max().reset_index().rename(columns={'hour': 'max_hour','min':'max_minuite'})
user_day_hourmin_min= data_time.groupby(['user_id', 'day']).min().reset_index().rename(columns={'hour': 'min_hour','min':'min_minuite'})
#-------merge-----
data = pd.merge(data, user_query_day, 'left', on=['user_id', 'day'])
data = pd.merge(data, user_query_day_hour, 'left',on=['user_id', 'day', 'hour'])
data = pd.merge(data, user_query_day_hour_min, 'left',on=['user_id', 'day', 'hour','min'])
data = pd.merge(data, user_query_day_hour_min_sec, 'left',on=['user_id', 'day', 'hour','min','sec'])
data = pd.merge(data, user_day_hourmin_mean, 'left',on=['user_id','day'])
data = pd.merge(data, user_day_hourmin_std, 'left',on=['user_id','day'])
data = pd.merge(data, user_day_hourmin_max, 'left',on=['user_id','day'])
data = | pd.merge(data, user_day_hourmin_min, 'left',on=['user_id','day']) | pandas.merge |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = | pd.DataFrame(df_raw_data.loc[21:22]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from zentables.zentables import _do_suppression
@pytest.fixture(scope="function")
def random() -> np.random.Generator:
return np.random.default_rng(123456)
def test_negative_numbers():
"""
Suppression should work on the _absolute value_ of the numbers, not the
signed value
"""
# In this case, -5 and 8 will get suppressed because there's only one value
# in their column suppressed
input_array = np.array([[1, 4, 5], [-5, 8, 9]])
expected_array = np.array([[True, True, False], [True, True, False]])
df = pd.DataFrame(input_array)
mask = _do_suppression(df, low=1, high=5)
assert (mask.values == expected_array).all()
def test_multiple_rows():
"""
If there are several choices, then we should suppress the second lowest value
"""
# In this case, -5 and 8 will get suppressed because there's only one value
# in their column suppressed and they are the smallest numbers
input_array = np.array([[1, 4, 5], [20, 40, 9], [-5, 8, 9]])
expected_array = np.array(
[[True, True, False], [False, False, False], [True, True, False]]
)
df = | pd.DataFrame(input_array) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = | Series(idx) | pandas.Series |
import numpy as np
import pdb
import gzip
import matplotlib
import matplotlib.pyplot as plt
import cPickle as pkl
import operator
import scipy.io as sio
import os.path
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
np.random.seed(23254)
def parse(path):
g = gzip.open(path, 'r')
for l in g:
yield eval(l)
def getuserCache(df):
userCache = {}
for uid in sorted(df.uid.unique().tolist()):
items = sorted(df.loc[df.uid == uid]['iid'].values.tolist())
userCache[uid] = items
return userCache
def getitemCache(df):
itemCache = {}
for iid in sorted(df.iid.unique().tolist()):
users = sorted(df.loc[df.iid == iid]['uid'].values.tolist())
itemCache[iid] = users
return itemCache
def readData(dataset):
totalFile = pd.read_csv('data/'+dataset+'/ratings.dat',sep="\t",usecols=[0,1],names=['uid','iid'],header=0)
total_uids = sorted(totalFile.uid.unique())
total_iids = sorted(totalFile.iid.unique())
trainFile = pd.read_csv('data/'+dataset+'/LOOTrain.dat',sep="\t",usecols=[0,1],names=['uid','iid'],header=0)
train_uids = sorted(trainFile.uid.unique())
train_iids = sorted(trainFile.iid.unique())
userCache = getuserCache(trainFile)
itemCache = getitemCache(trainFile)
root = "data/"+dataset
# Read data
df_data = | pd.read_csv(root+'/u.data',sep="\t",names=['uid','iid','rating']) | pandas.read_csv |
"""unit test for loanpy.loanfinder.py (2.0 BETA) for pytest 7.1.1"""
from inspect import ismethod
from os import remove
from pathlib import Path
from unittest.mock import patch, call
from pandas import DataFrame, RangeIndex, Series, read_csv
from pandas.testing import (assert_frame_equal, assert_index_equal,
assert_series_equal)
from pytest import raises
from loanpy.loanfinder import Search, gen, read_data, NoPhonMatch
from loanpy import loanfinder as lf
def test_read_data():
"""test if data is being read correctly"""
# setup expected outcome, path, input-dataframe, mock pandas.read_csv
srsexp = | Series(["a", "b", "c"], name="col1", index=[0, 1, 1]) | pandas.Series |
import numpy as np
from numpy import where
from pandas import DataFrame
from src.support import get_samples, display_cross_tab
from src.model import fit_predict, preprocessing_pipeline
from src.plots import create_model_plots, plot_smd
from src.propensity import create_matched_df, calc_smd
class PropensityScorer:
def __init__(self):
self.model_dispute = None
self.model_propensity=None
self.df = DataFrame()
self.model_input = DataFrame()
self.df_balanced = DataFrame()
self.smd_scores = | DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Main formatting source code to format modelling results for plotting.
This code was written to process PLEXOS HDF5 outputs to get them ready for plotting.
Once the data is processed it is outputted as an intermediary HDF5 file format so that
it can be read into the marmot_plot_main.py file
@author: <NAME>
"""
# ===============================================================================
# Import Python Libraries
# ===============================================================================
import os
import sys
import pathlib
FILE_DIR = pathlib.Path(__file__).parent.absolute() # Location of this module
if __name__ == '__main__': # Add Marmot directory to sys path if running from __main__
if os.path.dirname(os.path.dirname(__file__)) not in sys.path:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
os.chdir(pathlib.Path(__file__).parent.absolute().parent.absolute())
import time
import re
import logging
import logging.config
import pandas as pd
import h5py
import yaml
from typing import Union
try:
from marmot.meta_data import MetaData
except ModuleNotFoundError:
print("Attempted import of Marmot as a module from a Git directory. ", end='')
print("Import of Marmot will not function in this way. ", end='')
print("To import Marmot as a module use the preferred method of pip installing Marmot, ", end='')
print("or add the Marmot directory to the system path, see ReadME for details.\n")
print("System will now exit")
sys.exit()
import marmot.config.mconfig as mconfig
# Import as Submodule
try:
from h5plexos.query import PLEXOSSolution
except ModuleNotFoundError:
from marmot.h5plexos.h5plexos.query import PLEXOSSolution
# A bug in pandas requires this to be included,
# otherwise df.to_string truncates long strings. Fix available in Pandas 1.0
# but leaving here in case user version not up to date
pd.set_option("display.max_colwidth", 1000)
# Conversion units dict, key values is a tuple of new unit name and conversion multiplier
UNITS_CONVERSION = {
'kW': ('MW', 1e-3),
'MW': ('MW', 1),
'GW': ('MW', 1e3),
'TW': ('MW', 1e6),
'kWh': ('MWh', 1e-3),
'MWh': ('MWh', 1),
'GWh': ('MWh', 1e3),
'TWh': ('MWh', 1e6),
'lb': ('kg', 0.453592),
'ton': ('kg', 907.18474),
'kg': ('kg', 1),
'tonne': ('kg', 1000),
'$': ('$', 1),
'$000': ('$', 1000),
'h': ('h', 1),
'MMBTU': ('MMBTU', 1),
'GBTU': ('MMBTU', 1000),
'GJ"': ('MMBTU', 0.947817),
'TJ': ('MMBTU', 947.817120),
'$/MW': ('$/MW', 1),
'lb/MWh' : ('kg/MWh', 0.453592),
'Kg/MWh': ('Kg/MWh', 1)
}
class SetupLogger():
"""Sets up the python logger.
This class handles the following.
1. Configures logger from marmot_logging_config.yml file.
2. Handles rollover of log file on each instantiation.
3. Sets log_directory.
4. Append optional suffix to the end of the log file name
Optional suffix is useful when running multiple processes in parallel to
allow logging to separate files.
"""
def __init__(self, log_directory: str = 'logs',
log_suffix: str = None):
"""
Args:
log_directory (str, optional): log directory to save logs.
Defaults to 'logs'.
log_suffix (str, optional): Optional suffix to add to end of log file.
Defaults to None.
"""
if log_suffix is None:
self.log_suffix = ''
else:
self.log_suffix = f'_{log_suffix}'
current_dir = os.getcwd()
os.chdir(FILE_DIR)
try:
os.makedirs(log_directory)
except FileExistsError:
# log directory already exists
pass
with open('config/marmot_logging_config.yml', 'rt') as f:
conf = yaml.safe_load(f.read())
conf['handlers']['warning_handler']['filename'] = \
(conf['handlers']['warning_handler']['filename']
.format(log_directory, 'formatter', self.log_suffix))
conf['handlers']['info_handler']['filename'] = \
(conf['handlers']['info_handler']['filename']
.format(log_directory, 'formatter', self.log_suffix))
logging.config.dictConfig(conf)
self.logger = logging.getLogger('marmot_format')
# Creates a new log file for next run
self.logger.handlers[1].doRollover()
self.logger.handlers[2].doRollover()
os.chdir(current_dir)
class Process(SetupLogger):
"""Process PLEXOS class specific data from h5plexos database.
All methods are PLEXOS Class specific e.g generator, region, zone, line etc.
"""
def __init__(self, df: pd.DataFrame, metadata: MetaData,
model: str, Region_Mapping: pd.DataFrame,
emit_names: pd.DataFrame, logger: logging.Logger):
"""
Args:
df (pd.DataFrame): Unprocessed h5plexos dataframe containing
class and property specifc data.
metadata (MetaData): Instantiation of MetaData for specific
h5plexos file.
model (str): Name of specific PLEXOS model partition
Region_Mapping (pd.DataFrame): DataFrame to map custom
regions/zones to create custom aggregations.
emit_names (pd.DataFrame): DataFrame with 2 columns to rename
emission names.
logger (logging.Logger): logger object from SetupLogger.
"""
# certain methods require information from metadata. metadata is now
# passed in as an instance of MetaData class for the appropriate model
self.df = df
self.metadata = metadata
self.model = model
self.Region_Mapping = Region_Mapping
self.emit_names = emit_names
self.logger = logger
if not self.emit_names.empty:
self.emit_names_dict = (self.emit_names[['Original', 'New']]
.set_index("Original").to_dict()["New"])
def df_process_generator(self) -> pd.DataFrame:
"""Format PLEXOS Generator Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['tech', 'gen_name'], level=['category', 'name'], inplace=True)
if self.metadata.region_generator_category(self.model).empty is False:
region_gen_idx = pd.CategoricalIndex(self.metadata.region_generator_category(self.model)
.index.get_level_values(0))
region_gen_idx = region_gen_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_region = pd.MultiIndex(levels=df.index.levels + [region_gen_idx.categories],
codes=df.index.codes + [region_gen_idx.codes],
names=df.index.names + region_gen_idx.names)
else:
idx_region = df.index
if self.metadata.zone_generator_category(self.model).empty is False:
zone_gen_idx = pd.CategoricalIndex(self.metadata.zone_generator_category(self.model)
.index.get_level_values(0))
zone_gen_idx = zone_gen_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_zone = pd.MultiIndex(levels=idx_region.levels + [zone_gen_idx.categories],
codes=idx_region.codes + [zone_gen_idx.codes],
names=idx_region.names + zone_gen_idx.names)
else:
idx_zone = idx_region
if not self.Region_Mapping.empty:
region_gen_mapping_idx = pd.MultiIndex.from_frame(self.metadata.region_generator_category(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.sort_values(by=['tech', 'gen_name'])
.drop(['region', 'tech', 'gen_name'], axis=1)
)
region_gen_mapping_idx = region_gen_mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_map = pd.MultiIndex(levels=idx_zone.levels + region_gen_mapping_idx.levels,
codes=idx_zone.codes + region_gen_mapping_idx.codes,
names=idx_zone.names + region_gen_mapping_idx.names)
else:
idx_map = idx_zone
df = pd.DataFrame(data=df.values.reshape(-1), index=idx_map)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_region(self) -> pd.DataFrame:
"""Format PLEXOS Region Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('region', level='name', inplace=True)
# checks if Region_Mapping contains data to merge, skips if empty
if not self.Region_Mapping.empty:
mapping_idx = pd.MultiIndex.from_frame(self.metadata.regions(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.drop(['region', 'category'], axis=1)
)
mapping_idx = mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx = pd.MultiIndex(levels=df.index.levels + mapping_idx.levels,
codes=df.index.codes + mapping_idx.codes,
names=df.index.names + mapping_idx.names)
else:
idx = df.index
df = pd.DataFrame(data=df.values.reshape(-1), index=idx)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # Move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_zone(self) -> pd.DataFrame:
"""Format PLEXOS Zone Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('zone', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = | pd.to_numeric(df[0], downcast='float') | pandas.to_numeric |
from .statistic import StatisticHistogram
import singlecellmultiomics.pyutils as pyutils
import collections
import pandas as pd
import matplotlib.pyplot as plt
class MappingQualityHistogram(StatisticHistogram):
def __init__(self, args):
StatisticHistogram.__init__(self, args)
self.histogram = collections.Counter()
def processRead(self, read):
self.histogram[read.mapping_quality] += 1
def __repr__(self):
return f'The average mapping quality is {pyutils.meanOfCounter(self.histogram)}, SD:{pyutils.varianceOfCounter(self.histogram)}'
def get_df(self):
return | pd.DataFrame.from_dict({'mq': self.histogram}) | pandas.DataFrame.from_dict |
import os
import pandas as pd
from datetime import datetime
from maldives.technical_analysis import TA
from maldives.bot.models.dealer import Dealer
from pandas import DataFrame
class Wallet:
cache_file: str = '../data/transactions.csv'
data: DataFrame
assets: {}
def __init__(self):
self.data = DataFrame(columns=['date', 'symbol', 'type', 'amount', 'price'])
self.data['date'] = | pd.to_datetime(self.data['date']) | pandas.to_datetime |
from transformers import RobertaTokenizer, RobertaForSequenceClassification, AdamW
import torch
import json
from sklearn import metrics
from tqdm import tqdm
import numpy as np
from time import time
from datetime import timedelta
import pandas as pd
from sklearn.model_selection import train_test_split
import argparse
import torch.nn as nn
import random
def get_loader(dataset, tokenizer, batchsize=16, padsize=256, want_cate="Risk Ignorance"):
batch_inputs, batch_labels = [], []
inputs1, inputs2, categories, labels_ = [d['context'] for d in dataset], [d['response'] for d in dataset], [d['category'] for d in dataset], [d['label'] for d in dataset]
labels = []
for category, label in zip(categories, labels_):
if category==want_cate:
labels.append(int(label=='Unsafe'))
else:
labels.append(2)
for start in tqdm(range(0, len(inputs1), batchsize)):
tmp_batch = tokenizer(text=inputs1[start:min(start + batchsize, len(inputs1))],
text_pair=inputs2[start:min(start + batchsize, len(inputs1))],
return_tensors="pt", truncation=True, padding='max_length', max_length=padsize)
batch_inputs.append(tmp_batch)
tmp_label = torch.LongTensor(labels[start:min(start + batchsize, len(inputs1))])
batch_labels.append(tmp_label)
return batch_inputs, batch_labels
def get_loader_resp(dataset, tokenizer, batchsize=16, padsize=256, want_cate="Risk Ignorance"):
batch_inputs, batch_labels = [], []
inputs1, inputs2, categories, labels_ = [d['context'] for d in dataset], [d['response'] for d in dataset], [d['category'] for d in dataset], [d['label'] for d in dataset]
labels = []
for category, label in zip(categories, labels_):
if category==want_cate:
labels.append(int(label=='Unsafe'))
else:
labels.append(2)
for start in tqdm(range(0, len(inputs2), batchsize)):
tmp_batch = tokenizer(text=inputs2[start:min(start + batchsize, len(inputs2))],
return_tensors="pt", truncation=True, padding='max_length', max_length=padsize)
batch_inputs.append(tmp_batch)
tmp_label = torch.LongTensor(labels[start:min(start + batchsize, len(inputs2))])
batch_labels.append(tmp_label)
return batch_inputs, batch_labels
def predict(model, batch_inputs):
model.eval()
probs_all = np.zeros((0,3))
with torch.no_grad():
for inputs in tqdm(batch_inputs):
inputs = inputs.to(device)
outputs = model(**inputs)
logits = outputs.logits
prob = torch.softmax(logits, dim=1).cpu().numpy()
probs_all = np.concatenate((probs_all, prob),axis=0)
return probs_all
with open('../DiaSafety_dataset/test.json', 'r') as f:
test = json.load(f)
import pandas as pd
df = | pd.DataFrame.from_dict(test) | pandas.DataFrame.from_dict |
import utils
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
import requests
import pandas as pd
import os
BASE_DATA_DIR = "/p/adversarialml/as9rw/datasets/census"
SUPPORTED_PROPERTIES = ["sex", "race", "none"]
PROPERTY_FOCUS = {"sex": "Female", "race": "White"}
# US Income dataset
class CensusIncome:
def __init__(self, path=BASE_DATA_DIR):
self.urls = [
"http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names",
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
]
self.columns = [
"age", "workClass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship",
"race", "sex", "capital-gain", "capital-loss",
"hours-per-week", "native-country", "income"
]
self.dropped_cols = ["education", "native-country"]
self.path = path
self.download_dataset()
# self.load_data(test_ratio=0.4)
self.load_data(test_ratio=0.5)
# Download dataset, if not present
def download_dataset(self):
if not os.path.exists(self.path):
print("==> Downloading US Census Income dataset")
os.mkdir(self.path)
print("==> Please modify test file to remove stray dot characters")
for url in self.urls:
data = requests.get(url).content
filename = os.path.join(self.path, os.path.basename(url))
with open(filename, "wb") as file:
file.write(data)
# Process, handle one-hot conversion of data etc
def process_df(self, df):
df['income'] = df['income'].apply(lambda x: 1 if '>50K' in x else 0)
def oneHotCatVars(x, colname):
df_1 = x.drop(columns=colname, axis=1)
df_2 = pd.get_dummies(x[colname], prefix=colname, prefix_sep=':')
return ( | pd.concat([df_1, df_2], axis=1, join='inner') | pandas.concat |
from sqlalchemy import true
import FinsterTab.W2020.DataForecast
import datetime as dt
from FinsterTab.W2020.dbEngine import DBEngine
import pandas as pd
import sqlalchemy as sal
import numpy
from datetime import datetime, timedelta, date
import pandas_datareader.data as dr
def get_past_data(self):
"""
Get raw data from Yahoo! Finance for SPY during Great Recession
Store data in MySQL database
:param sources: provides ticker symbols of instruments being tracked
"""
# Assume that date is 2010
now = dt.date(2009, 1, 1) # Date Variables
start = now - timedelta(days=1500) # get date value from 5 years ago
end = now
# data will be a 2D Pandas Dataframe
data = dr.DataReader('SPY', 'yahoo', start, end)
symbol = [3] * len(data) # add column to identify instrument id number
data['instrumentid'] = symbol
data = data.reset_index() # no designated index - easier to work with mysql database
# Yahoo! Finance columns to match column names in MySQL database.
# Column names are kept same to avoid any ambiguity.
# Column names are not case-sensitive.
data.rename(columns={'Date': 'date', 'High': 'high', 'Low': 'low', 'Open': 'open', 'Close': 'close',
'Adj Close': 'adj close', 'Volume': 'volume'}, inplace=True)
data.sort_values(by=['date']) # make sure data is ordered by trade date
# send data to database
# replace data each time program is run
data.to_sql('dbo_paststatistics', self.engine, if_exists=('replace'),
index=False,
dtype={'date': sal.Date, 'open': sal.FLOAT, 'high': sal.FLOAT, 'low': sal.FLOAT,
'close': sal.FLOAT, 'adj close': sal.FLOAT, 'volume': sal.FLOAT})
# Tests the accuracy of the old functions
def accuracy(self):
query = 'SELECT * FROM dbo_algorithmmaster'
algorithm_df = pd.read_sql_query(query, self.engine)
query = 'SELECT * FROM dbo_instrumentmaster'
instrument_master_df = pd.read_sql_query(query, self.engine)
# Changes algorithm code
for code in range(len(algorithm_df)):
# Dynamic range for changing instrument ID starting at 1
for ID in range(1, len(instrument_master_df) + 1):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = %d AND b.instrumentid = %d AND a.algorithmcode = "%s"' % (
ID, ID, algorithm_df['algorithmcode'][code])
df = pd.read_sql_query(query, self.engine)
count = 0
# Calculates accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(len(df)):
absolute_percent_error.append(
abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / len(df)
# return the average percent error calculated above
print("Average percent error for instrument: %d and algorithm: %s " % (ID, algorithm_df['algorithmcode'][code]), average_percent_error)
#print('Algorithm:', algorithm_df['algorithmcode'][code])
#print('instrumentid: %d' % ID, instrument_master_df['instrumentname'][ID - 1])
#print('length of data is:', len(df))
#print('number correct: ', count)
d = len(df)
b = (count / d) * 100
#print('The accuracy is: %.2f%%\n' % b)
# Isolated tests for ARIMA as we where trying to determine why it was so accurate
def arima_accuracy(self):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = 1 AND b.instrumentid = 1 AND a.algorithmcode = "ARIMA"'
df = pd.read_sql_query(query, self.engine)
df = df.tail(10)
df = df.reset_index(drop=true)
#print(df)
arima_count = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x] \
or (df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
arima_count += 1
#print(df['close'], df['forecastcloseprice'])
#print(arima_count)
#print(arima_count/len(df))
# Accuracy test for the new function MSF1
def MSF1_accuracy(self):
# Queires the database to grab all of the Macro Economic Variable codes
query = "SELECT macroeconcode FROM dbo_macroeconmaster WHERE activecode = 'A'"
id = pd.read_sql_query(query, self.engine)
id = id.reset_index(drop=True)
# Queries the database to grab all of the instrument IDs
query = 'SELECT instrumentid FROM dbo_instrumentmaster'
id2 = pd.read_sql_query(query, self.engine)
id2 = id2.reset_index(drop=True)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# Bool to determine whether we append to dbo_tempvisualize or replace the values
to_append = False
# Create a for loop to iterate through all of the instrument ids
for v in id2['instrumentid']:
# Initializes a list for which we will eventually be storing all data to add to the macroeconalgorithm database table
data = []
# Data1 will be used to store the forecastdate, instrumentid, forecastprice, and algorithm code
# It will be used to graph our backtested forecast against the actual instrument prices
data1 = []
# Getting Dates for Future Forecast as well as actual close prices for instrumentID#
# We chose 2018 - 2020, to alter this date range simply change the dates in the 3rd line of the query for the dates you want to test on
# Make sure they are valid dates as some instruments only have statistics that go back so far, check the instrument statistic table to figure out how far back each instrument goes
query = "SELECT date, close FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, start_date, end_date)
# instrument_stats will hold the closing prices and the dates for the dates we are forecasting for
instrument_stats = pd.read_sql_query(query, self.engine)
# We isolate the dates and closing prices into individual arrays to make them easier to work with
date = []
close = []
for i in instrument_stats['date']:
date.append(i)
for i in instrument_stats['close']:
close.append(i)
# n will always correspond to the amount of dates, as the amount of dates is the number of data points being compared
n = len(date)
# Median_forecast will be a dictionary where the key is the date and the value is a list of forecasted prices
median_forecast = {}
# This disctionary will be used to easily combine all of the forecasts for different dates to determine the median forecast value
for i in date:
temp = {i: []}
median_forecast.update(temp)
# This query will grab quarterly instrument prices from between 2014 and the current date to be used in the forecasting
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, train_date, start_date)
# Executes the query and stores the result in a dataframe variable
df2 = | pd.read_sql_query(query, self.engine) | pandas.read_sql_query |
import pandas as pd
import os
import matplotlib.pyplot as plt
import random
import numpy as np
def countChannelsInBarcodeList(path_to_decoded_genes: str):
'''
This function focuses on all stats that are purely based on how many times a certain channel was called, in what round.
This can be useful in debugging certain weird decoding behaviour, like finding wether a channel is overexpressed.
'''
df = pd.read_csv(path_to_decoded_genes)
barcodes = list(df['Barcode'])
# resulting df will have the following columns:
# 'Channel' 'Round' 'total channel count'
total_channel_counts= {}
channel_dict = {} # takes tuples of round/channel as key, and number of times encountered as value
for element in barcodes:
element_list = [int(digit) for digit in str(element)]
for i,channel_nr in enumerate(element_list):
round_nr = i+1 # Because enumerating starts with 0
# increment the tuple combination fo round/channel with one
channel_dict[(round_nr,channel_nr)] = channel_dict.get((round_nr,channel_nr), 0) + 1
total_channel_counts[channel_nr] = total_channel_counts.get(channel_nr, 0) + 1
rows_list = []
col_names = ['round_nr', 'channel_nr', 'count']
#grouped_by_channel_dict = {}
# Create the rows in the dataframe by representing them as dataframes
for k,count in channel_dict.items():
temp_dict = {}
round_nr, channel_nr = k
row_values = [round_nr, channel_nr, count]
temp_dict = {col_names[i]: row_values[i] for i in range(0,len(col_names)) }
rows_list.append(temp_dict)
count_df = pd.DataFrame(rows_list)
wide_df = count_df.pivot_table(index=["channel_nr"], columns='round_nr', values='count', margins=True, aggfunc='sum')
wide_df.to_html("channels_called.html")
def evaluateRandomCalling(path_to_decoded_genes: str, path_to_codebook: str, num_rounds: int, num_channels: int, ratio_recognized_barcodes: int = 0, simulate=False):
codebook_df = pd.read_csv(path_to_codebook)
decoded_df = pd.read_csv(path_to_decoded_genes)
# Attribute dicts is going to collect all columns of the only row the analytics df will have, key=column name, value = column value
attribute_dict={}
n_genes_to_find=len(codebook_df)
attribute_dict['# genes in codebook'] = n_genes_to_find
n_spots= len(decoded_df)
attribute_dict['# spots detected']= n_spots
# Create the counted column
decoded_df['Counted'] = decoded_df.groupby('Barcode')['Gene'].transform('size') # count every barcode-gene combination and make a new column out of it
unique_df = decoded_df[['Barcode', 'Counted']].drop_duplicates()
unique_df = unique_df.sort_values(by=['Counted'], ascending=False)
non_recognized_barcodes = [barcode for barcode in list(unique_df['Barcode']) if barcode not in list(codebook_df['Barcode'])]
non_recognized_df= unique_df[unique_df.Barcode.isin(non_recognized_barcodes)]
unique_df.to_html("unique_barcodes_called_counted.html")
color_list = ['green' if barcode in list(codebook_df['Barcode']) else 'red' for barcode in decoded_df['Barcode']]
fig= plt.figure(figsize=(13,9))
plt.scatter(decoded_df['Barcode'], decoded_df['Counted'], c = color_list)
plt.title("Barcodes counted")
plt.xlabel("Barcode combination")
plt.ylabel("Number of times recognized")
plt.savefig("barcodes_counted.svg")
# Evaluate randomness
possible_barcode_combinations = int(num_channels) ** int(num_rounds)
n_unique_barcodes_called = len(unique_df)
n_random_calls_expected_per_barcode = n_spots/possible_barcode_combinations
ratio_recognized_barcodes_random_calling_would_create = round(((n_random_calls_expected_per_barcode * n_genes_to_find) / n_spots), 3) *100
# Add to the row entry
attribute_dict['# possible combination'] = possible_barcode_combinations
attribute_dict['# unique barcodes called'] = n_unique_barcodes_called
attribute_dict['# calls per barcode combination expected if random calling'] = n_random_calls_expected_per_barcode
attribute_dict['Random recognized ratio'] = ratio_recognized_barcodes_random_calling_would_create
rows_list=[]
rows_list.append(attribute_dict)
analytics_df = pd.DataFrame(rows_list)
analytics_df.to_html("decoded_stat.html")
def countRecognizedBarcodeStats(path_to_decoded_genes: str):
df = | pd.read_csv(path_to_decoded_genes) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In this Notebook I have implemented Scratch Implementations of Logistic Regression using Gradient Descent Algorithm and also Regularized Logistic Regression. The main motive for including scratch implementations but not scikit libraries were
#
# <ul>
# <li> Understand how gradient descent minimizes the cost function to give optimal solution </li>
# <li> Visualise how change in learning rate affects the training time </li>
# <li> Get a better intuition of bias-variance tradeoff by changing the regularization parameter and the cutoff threshold </li>
# </ul>
#
#
#
# <b> Dataset </b>
# We have the customer data for a telecom company which offers many services like phone, internet, TV Streaming and Movie Streaming. The Goal is to predict whether or not a particular customer is likely to retain services. This is represented by the Churn column in dataset. Churn=Yes means customer leaves the company, whereas Churn=No implies customer is retained by the company.
#
#
# In[ ]:
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
import numpy as np
# In[ ]:
churndata = pd.read_csv("../../../input/blastchar_telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv")
# In[ ]:
churndata.head()
churndata.columns
# In[ ]:
#Converting the Yes- No volumn to a binary column
churndata.Churn = churndata.Churn.map(dict(Yes=1,No=0))
# Since its a convention to do some exploratory data analysis before modeling, lets do some graph plotting
# In[ ]:
gender_tab = pd.crosstab(churndata.gender,churndata.Churn,normalize=True)
print(gender_tab)
gender_tab.plot(kind="bar",stacked=True)
#This graph proves that Gender has not much impact on churn, since an equivalent number of cusotmers churn for each category
# In[ ]:
senior_citizen_tab = pd.crosstab(churndata.SeniorCitizen,churndata.Churn,normalize=True)
print(senior_citizen_tab)
senior_citizen_tab.plot(kind="bar",stacked=True)
#This Graph shows that a higher properion of customers churn in case of senior citizens
# In[ ]:
fig, axes = plt.subplots(nrows=2, ncols=3)
fig.set_figheight(15)
fig.set_figwidth(15)
#Similarly we can make Graph for Other Categorical Variables as well
partner_tab = pd.crosstab(churndata.Partner,churndata.Churn,normalize=True)
partner_tab.plot(kind="bar",stacked=True,layout=(2,3),ax=axes[0,0])
phoneservice_tab = pd.crosstab(churndata.PhoneService,churndata.Churn,normalize=True)
phoneservice_tab.plot(kind="bar",stacked=True,layout=(2,3),ax=axes[0,1])
multiplelines_tab = pd.crosstab(churndata.MultipleLines,churndata.Churn,normalize=True)
multiplelines_tab.plot(kind="bar",stacked=True,layout=(2,3),ax=axes[0,2])
internetservice_tab = pd.crosstab(churndata.InternetService,churndata.Churn,normalize=True)
internetservice_tab.plot(kind="bar",stacked=True,layout=(2,3),ax=axes[1,0])
OnlineSecurity_tab = pd.crosstab(churndata.OnlineSecurity,churndata.Churn,normalize=True)
OnlineSecurity_tab.plot(kind="bar",stacked=True,layout=(2,3),ax=axes[1,1])
OnlineBackup_tab = pd.crosstab(churndata.OnlineBackup,churndata.Churn,normalize=True)
OnlineBackup_tab.plot(kind="bar",stacked=True,ax=axes[1,2])
# In[ ]:
fig, axes = plt.subplots(nrows=2, ncols=3)
fig.set_figheight(15)
fig.set_figwidth(15)
DeviceProtection_tab = pd.crosstab(churndata.DeviceProtection,churndata.Churn,normalize=True)
DeviceProtection_tab.plot(kind="bar",stacked=True,ax=axes[0,0])
TechSupport_tab = pd.crosstab(churndata.TechSupport,churndata.Churn,normalize=True)
TechSupport_tab.plot(kind="bar",stacked=True,ax=axes[0,1])
StreamingTV_tab = pd.crosstab(churndata.StreamingTV,churndata.Churn,normalize=True)
StreamingTV_tab.plot(kind="bar",stacked=True,ax=axes[0,2])
StreamingMovies_tab = pd.crosstab(churndata.StreamingMovies,churndata.Churn,normalize=True)
StreamingMovies_tab.plot(kind="bar",stacked=True,ax=axes[1,0])
Contract_tab = pd.crosstab(churndata.Contract,churndata.Churn,normalize=True)
Contract_tab.plot(kind="bar",stacked=True,ax=axes[1,1])
PaperlessBilling_tab = pd.crosstab(churndata.PaperlessBilling,churndata.Churn,normalize=True)
PaperlessBilling_tab.plot(kind="bar",stacked=True,ax=axes[1,2])
PM_tab = pd.crosstab(churndata.PaymentMethod,churndata.Churn,normalize=True)
PM_tab.plot(kind="bar",stacked=True)
# Based on the information we can say that gender is not a significant variable for churn and is also correalted with others, so we can drop it.
# In[ ]:
#Since we want to retreive dummy variables from the
pd.factorize(churndata['SeniorCitizen'])
pd.factorize(churndata['Dependents'])
pd.factorize(churndata['PhoneService'])
pd.factorize(churndata['MultipleLines'])
pd.factorize(churndata['InternetService'])
pd.factorize(churndata['OnlineSecurity'])
pd.factorize(churndata['OnlineBackup'])
pd.factorize(churndata['DeviceProtection'])
pd.factorize(churndata['TechSupport'])
pd.factorize(churndata['StreamingTV'])
pd.factorize(churndata['StreamingMovies'])
pd.factorize(churndata['Contract'])
pd.factorize(churndata['PaperlessBilling'])
pd.factorize(churndata['PaymentMethod'])
# In[ ]:
# Next we take all the categorrical variables and convert them dummy variables
cat_vars = ['SeniorCitizen', 'Partner', 'Dependents','PhoneService', 'MultipleLines', 'InternetService','OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport','StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling']
for var in cat_vars:
cat_list='var'+'_'+var
cat_list = | pd.get_dummies(churndata[var], prefix=var,drop_first=True) | pandas.get_dummies |
"""Test OMMBV.satellite functions"""
import datetime as dt
import numpy as np
import pandas as pds
import pysat
import OMMBV
class TestSatellite(object):
def setup(self):
"""Setup test environment before each function."""
self.inst = pysat.Instrument('pysat', 'testing', num_samples=32)
return
def teardown(self):
"""Clean up test environment after each function."""
del self.inst
return
def test_application_add_unit_vectors(self):
"""Check application of unit_vectors to satellite data"""
self.inst.load(2010, 365)
self.inst['altitude'] = 550.
OMMBV.satellite.add_mag_drift_unit_vectors_ecef(self.inst)
items = ['unit_zon_ecef_x', 'unit_zon_ecef_y', 'unit_zon_ecef_z',
'unit_fa_ecef_x', 'unit_fa_ecef_y', 'unit_fa_ecef_z',
'unit_mer_ecef_x', 'unit_mer_ecef_y', 'unit_mer_ecef_z']
for item in items:
assert item in self.inst.data
return
def test_application_add_mag_drifts(self):
"""Check application of unit vectors to drift measurements"""
self.inst.load(2010, 365)
self.inst['altitude'] = 550.
# Create false orientation signal
self.inst['sc_xhat_x'] = 1.
self.inst['sc_xhat_y'] = 0.
self.inst['sc_xhat_z'] = 0.
self.inst['sc_yhat_x'] = 0.
self.inst['sc_yhat_y'] = 1.
self.inst['sc_yhat_z'] = 0.
self.inst['sc_zhat_x'] = 0.
self.inst['sc_zhat_y'] = 0.
self.inst['sc_zhat_z'] = 1.
# Add vectors and test that vectors were added.
OMMBV.satellite.add_mag_drift_unit_vectors(self.inst)
items = ['unit_zon_x', 'unit_zon_y', 'unit_zon_z',
'unit_fa_x', 'unit_fa_y', 'unit_fa_z',
'unit_mer_x', 'unit_mer_y', 'unit_mer_z']
for item in items:
assert item in self.inst.data
# Check adding drifts now.
self.inst['iv_x'] = 150.
self.inst['iv_y'] = 50.
self.inst['iv_z'] = -50.
OMMBV.satellite.add_mag_drifts(self.inst)
items = ['iv_zon', 'iv_fa', 'iv_mer']
for item in items:
assert item in self.inst.data
# Check scaling to footpoints and equator
self.inst['equ_mer_drifts_scalar'] = 1.
self.inst['equ_zon_drifts_scalar'] = 1.
self.inst['north_footpoint_mer_drifts_scalar'] = 1.
self.inst['north_footpoint_zon_drifts_scalar'] = 1.
self.inst['south_footpoint_mer_drifts_scalar'] = 1.
self.inst['south_footpoint_zon_drifts_scalar'] = 1.
OMMBV.satellite.add_footpoint_and_equatorial_drifts(self.inst)
items = ['equ_mer_drifts_scalar', 'equ_zon_drifts_scalar',
'north_footpoint_mer_drifts_scalar',
'north_footpoint_zon_drifts_scalar',
'south_footpoint_mer_drifts_scalar',
'south_footpoint_zon_drifts_scalar']
for item in items:
assert item in self.inst.data
return
def test_unit_vector_properties(self):
"""Test basic vector properties along field lines."""
# Start with a set of locations
p_long = np.arange(0., 360., 12.)
p_alt = 0 * p_long + 550.
p_lats = [5., 10., 15., 20., 25., 30.]
truthiness = []
for i, p_lat in enumerate(p_lats):
date = dt.datetime(2000, 1, 1)
ecef_x, ecef_y, ecef_z = OMMBV.trans.geocentric_to_ecef(p_lat,
p_long,
p_alt)
for j, (x, y, z) in enumerate(zip(ecef_x, ecef_y, ecef_z)):
# Perform field line traces
trace_n = OMMBV.trace.field_line_trace(np.array([x, y, z]), date, 1.,
0., step_size=.5,
max_steps=1.E6)
trace_s = OMMBV.trace.field_line_trace(np.array([x, y, z]), date, -1.,
0., step_size=.5,
max_steps=1.E6)
# Combine together, S/C position is first for both
# Reverse first array and join so plotting makes sense
trace = np.vstack((trace_n[::-1], trace_s))
trace = | pds.DataFrame(trace, columns=['x', 'y', 'z']) | pandas.DataFrame |
import os
import pandas as pd
from IPython.core.display import display, HTML
from recordsearch_tools.client import RSSeriesClient
import plotly.offline as py
import plotly.graph_objs as go
from textblob import TextBlob
import nltk
stopwords = nltk.corpus.stopwords.words('english')
py.init_notebook_mode()
def make_summary(series, df, include_titles=True):
# We're going to assemble some summary data about the series in a 'summary' dictionary
# Let's create the dictionary
summary = {'series': series}
if include_titles:
s = RSSeriesClient()
series_data = s.get_summary(series)
summary['title'] = series_data['title']
summary['total_items'] = df.shape[0]
# Get the frequency of the different access status categories
summary['access_counts'] = df['access_status'].value_counts().to_dict()
# Get the number of files that have been digitised
summary['digitised_files'] = len(df.loc[df['digitised_status'] == True])
# Get the number of individual pages that have been digitised
summary['digitised_pages'] = df['digitised_pages'].sum()
# Get the earliest start date
start = df['start_date'].min()
try:
summary['date_from'] = start.year
except AttributeError:
summary['date_from'] = None
# Get the latest end date
end = df['end_date'].max()
try:
summary['date_to'] = end.year
except AttributeError:
summary['date_to'] = None
return summary
def display_summary(series, df):
summary = make_summary(series, df)
display(HTML('<h1>National Archives of Australia: Series {}</h1>'.format(series)))
display(HTML('<h3>{}</h3>'.format(summary['title'])))
table = '<table class="table" style="text-align: left">'
table += '<tr><th style="text-align: left">Total items</th><td style="text-align: left">{:,}</td></tr>'.format(summary['total_items'])
table += '<tr><th style="text-align: left">Access status</th><td></td></tr>'
for status, number in summary['access_counts'].items():
table += '<tr><td style="text-align: left">{}</td><td style="text-align: left">{:,} ({:.2%})</td></tr>'.format(status, number, number/summary['total_items'])
table += '<tr><th style="text-align: left">Number of items digitised</th><td style="text-align: left">{:,} ({:.2%})</td></tr>'.format(summary['digitised_files'], summary['digitised_files']/summary['total_items'])
table += '<tr><th style="text-align: left">Number of pages digitised</th><td style="text-align: left">{:,}</td></tr>'.format(summary['digitised_pages'])
table += '<tr><th style="text-align: left">Date of earliest content</th><td style="text-align: left">{}</td></tr>'.format(summary['date_from'])
table += '<tr><th style="text-align: left">Date of latest content</th><td style="text-align: left">{}</td></tr>'.format(summary['date_to'])
table += '</table>'
table += '<ul><li><b><a href="https://github.com/wragge/ozglam-workbench/blob/master/data/RecordSearch/{}.csv">Download item data (CSV format)</a></b></li>'.format(series.replace('/', '-'))
table += '<li><b><a href="http://www.naa.gov.au/cgi-bin/Search?O=S&Number={}">View details on RecordSearch</a></b></li></ul>'.format(series)
display(HTML(table))
def make_df_all(series_list):
# Create a list to store the summaries
summaries = []
# Loop through the list of series in this repo
for series in series_list:
# Open the CSV of each series harvest as a data frame
df = pd.read_csv('../data/RecordSearch/{}.csv'.format(series.replace('/', '-')), parse_dates=['start_date', 'end_date'])
# Extract a summary of each series and add it to the list of summaries
summaries.append(make_summary(series, df, include_titles=False))
# Convert the list of summaries into a DataFrame for easy manipulation
df = pd.DataFrame(summaries)
# Flatten the access count dictionaries and fill blanks with zero
df = pd.concat([df, pd.DataFrame((d for idx, d in df['access_counts'].iteritems()))], axis=1).fillna(0)
# Change access counts from floats to integers
df[['Closed', 'Not yet examined', 'Open with exception', 'Open']] = df[['Closed', 'Not yet examined', 'Open with exception', 'Open']].astype(int)
# Delete the old 'access_counts' column
del df['access_counts']
# For convenience acronymise 'Not yet examined' and 'Open with exception'
df.rename({'Not yet examined': 'NYE', 'Open with exception': 'OWE'}, axis=1, inplace=True)
return df
def make_summary_all(df):
summary = {}
summary['total_items'] = df['total_items'].sum()
summary['date_from'] = int(df['date_from'].min())
summary['date_to'] = int(df['date_to'].max())
access_status = {}
for status in ['Open', 'OWE', 'NYE', 'Closed']:
status_total = df[status].sum()
access_status[status] = status_total
summary['access_counts'] = access_status
summary['digitised_files'] = df['digitised_files'].sum()
summary['digitised_pages'] = df['digitised_pages'].sum()
return summary
def display_summary_all(df):
summary = make_summary_all(df)
display(HTML('<h2>Aggregated totals</h2>'))
table = '<table class="table" style="text-align: left">'
table += '<tr><th style="text-align: left">Total items</th><td style="text-align: left">{:,}</td></tr>'.format(summary['total_items'])
table += '<tr><th style="text-align: left">Access status</th><td></td></tr>'
for status, number in summary['access_counts'].items():
table += '<tr><td style="text-align: left">{}</td><td style="text-align: left">{:,} ({:.2%})</td></tr>'.format(status, number, number/summary['total_items'])
table += '<tr><th style="text-align: left">Number of items digitised</th><td style="text-align: left">{:,} ({:.2%})</td></tr>'.format(summary['digitised_files'], summary['digitised_files']/summary['total_items'])
table += '<tr><th style="text-align: left">Number of pages digitised</th><td style="text-align: left">{:,}</td></tr>'.format(summary['digitised_pages'])
table += '<tr><th style="text-align: left">Date of earliest content</th><td style="text-align: left">{}</td></tr>'.format(summary['date_from'])
table += '<tr><th style="text-align: left">Date of latest content</th><td style="text-align: left">{}</td></tr>'.format(summary['date_to'])
table += '</table>'
display(HTML(table))
def display_series_all(df):
# Get the columns into the order we want
df = df[['series', 'total_items', 'date_from', 'date_to', 'Open', 'OWE', 'NYE', 'Closed', 'digitised_files', 'digitised_pages']].copy()
# Calculate and add a percentage open column
df['% open'] = df['Open'] / df['total_items']
# Calculate and add a percentage digitised column
df['% digitised'] = df['digitised_files'] / df['total_items']
# Add a link to the series name
df['series'] = df['series'].apply(lambda x: '<a href="{}-summary.ipynb">{}</a>'.format(x.replace('/', '-'), x))
# Style the output
table = (df.style
.set_properties(**{'font-size': '120%'})
.set_properties(subset=['series'], **{'text-align': 'left', 'font-weight': 'bold'})
.format('{:,}', ['total_items', 'Open', 'OWE', 'NYE', 'Closed', 'digitised_files', 'digitised_pages'])
.format('{:.2%}', ['% open', '% digitised'])
# Hide the index
.set_table_styles([dict(selector="th", props=[("font-size", "120%"), ("text-align", "center")]),
dict(selector='.row_heading, .blank', props=[('display', 'none')])])
.background_gradient(cmap='Greens', subset=['% open', '% digitised'], high=0.5)
)
return table
def make_year_trace(df, digitised=True):
all_years = []
for row in df.loc[df['digitised_status'] == digitised].itertuples(index=False):
try:
years = pd.date_range(start=row.start_date, end=row.end_date, freq='AS').year.to_series()
except ValueError:
# No start date
pass
else:
all_years.append(years)
year_counts = | pd.concat(all_years) | pandas.concat |
from pathlib import Path
import pandas as pd
import numpy as np
DATA_DIR = Path(__file__).parents[1] / 'data'
def load_so_cgm():
data_path = str(DATA_DIR / 'private' / 'dexcom_cgm')
dfs = []
for p in Path(data_path).iterdir():
if str(p).endswith('.csv'):
df = | pd.read_csv(p) | pandas.read_csv |
###############
#
# Transform R to Python Copyright (c) 2016 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import arviz as az
file_beer_sales_3 = pandas.read_csv('3-6-1-beer-sales-3.csv')
print(file_beer_sales_3.head())
# violin plot
sns.violinplot(x='weather', y='sales', data=file_beer_sales_3)
plt.show()
file_beer_sales_3_dm = pandas.get_dummies(file_beer_sales_3)
print(file_beer_sales_3_dm.head())
sample_num = len(file_beer_sales_3_dm['sales'])
sales = file_beer_sales_3_dm['sales']
weather_cloudy = file_beer_sales_3_dm['weather_cloudy']
weather_rainy = file_beer_sales_3_dm['weather_rainy']
weather_sunny = file_beer_sales_3_dm['weather_sunny']
print(sample_num)
print(sales)
print(len(weather_sunny))
weather_rainy_pred = [0, 1, 0]
weather_sunny_pred = [0, 0, 1]
stan_data = {
'N': sample_num,
'sales': sales,
'weather_rainy': weather_rainy,
'weather_sunny': weather_sunny,
'N_pred': 3,
'weather_rainy_pred': weather_rainy_pred,
'weather_sunny_pred': weather_sunny_pred
}
if os.path.exists('3-6-1-cat-lm.pkl'):
sm = pickle.load(open('3-6-1-cat-lm.pkl', 'rb'))
# sm = pystan.StanModel(file='3-6-1-cat-lm.stan')
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='3-6-1-cat-lm.stan')
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
thin=1
)
print(mcmc_result)
mcmc_result.plot()
plt.show()
# extracting predicted sales
mcmc_sample = mcmc_result.extract()
# box plot
df = | pandas.DataFrame(mcmc_sample['sales_pred']) | pandas.DataFrame |
import json
from definitions import *
from cdrkm_model import CDRKM
from kernels import kernel_factory
import argparse
from pathlib import Path
import pandas
import torch
from utils import save_altairplot, load_dataset, merge_two_dicts
import numpy as np
def eval_training(filepath: Path):
sd_mdl = torch.load(filepath, map_location=torch.device('cpu'))
args = sd_mdl['args']
_, xtrain, _ = load_dataset(args.dataset, args.Nd, [], seed=args.seed)
kernels = [kernel_factory(*x) for x in zip(args.kernel, args.kernelparam)]
model = CDRKM(kernels, args.s, xtrain.shape[0], gamma=args.gamma, layerwisein=args.layerwisein, xtrain=xtrain,
ortoin=(args.train_algo == 2)).to(device)
model.load_state_dict(sd_mdl['cdrkm_state_dict'])
train_table_datatypes = sd_mdl['train_table_datatypes']
train_table = pandas.read_json(sd_mdl['train_table'], orient='records', dtype=json.loads(train_table_datatypes))
final_i = train_table['i'].iat[-1] # total number of iterations
final_j = train_table['j'].iat[-1] # final objective value
final_orto = train_table['orto'].iat[-1] # final feasibility
final_outer_i = train_table['outer_i'].iat[-1] #final number of outer iterations
final_X = np.array(train_table['X'].iat[-1]) if 'X' in train_table else model.h # final H
if 'X' in train_table:
model.h[:] = torch.tensor(final_X)
model.h.requires_grad_(True)
h = model.h
loss = model(xtrain)[0]
loss.backward()
u, s, v = torch.svd(h.cpu() - h.grad.cpu())
out = torch.norm(h - torch.mm(u.to(device), v.to(device).t()))
final_XUVT = float(out) # final ||X-U*V’||
elapsed_seconds = sd_mdl.get('elapsed_time', -1)
if type(elapsed_seconds) != int:
elapsed_seconds = elapsed_seconds.seconds
eval_dict = {'final_j': final_j,
'final_orto': final_orto,
'final_X': final_X,
'final_XUVT': final_XUVT,
'final_i': final_i,
'final_outer_i': final_outer_i,
'elapsed_seconds': elapsed_seconds}
return eval_dict
def distance_matrix(x, y):
if type(x[0]) == float:
diffs = [abs(a-b) for a in x for b in y]
else:
diffs = [torch.pow(torch.norm(a - b), 1) for a in x for b in y]
diffs = torch.tensor(diffs).view(len(x), len(y)).numpy()
return diffs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--labels', type=str, nargs="+")
args = parser.parse_args()
filenames = [OUT_DIR.joinpath('%s/model.pt' % filename) for filename in args.labels]
sd_mdls = [torch.load(str(filename), map_location=torch.device('cpu')) for filename in filenames]
# Evaluate each model
df = []
for sd_mdl, filename in zip(sd_mdls, filenames):
label = sd_mdl.get('label', filename.parent.stem)
print(f"Evaluating {label}")
print(sd_mdl['args'])
eval_dict = eval_training(filename, plotting=False)
df.append(merge_two_dicts(eval_dict, vars(sd_mdl['args'])))
if eval_dict['plot'] is not None:
save_altairplot(eval_dict['plot'], filename.parent.joinpath("train_table_plot_%s.pdf" % label))
print("\n".join("{}\t{}".format(k, str(v)) for k, v in eval_dict.items() if k not in ["final_X", "plot"]))
print("-------------------------")
df = pandas.DataFrame(df)
# Compare solutions
algos = df['train_algo'].unique()
algos_names = {2: 'Cayley ADAM', 3: 'Projected Gradient', 5: 'Augmented Lagrangian'}
algos_names = [algos_names[algo] for algo in algos]
hs_by_algo = {algo: [torch.from_numpy(h[0]) if len(h[0].shape) == 2 else torch.from_numpy(h) for h in df[(df['train_algo'] == algo) & (df['seed'] == 0)]['final_X'].to_list()]
for algo in algos}
cost_by_algo = {algo: [cost for cost in df[(df['train_algo'] == algo) & (df['seed'] == 0)]['final_j'].to_list()]
for algo in algos}
hs_diffs = [distance_matrix(hs_by_algo[algo1], hs_by_algo[algo2]) for algo1 in algos for algo2 in algos]
cost_diffs = [distance_matrix(cost_by_algo[algo1], cost_by_algo[algo2]) for algo1 in algos for algo2 in algos]
mean_hs_diffs = np.array([np.mean(d) for d in hs_diffs]).reshape((len(algos), len(algos)))
mean_cost_diffs = np.array([np.mean(d) for d in cost_diffs]).reshape((len(algos), len(algos)))
std_hs_diffs = np.array([np.std(d) for d in hs_diffs]).reshape((len(algos), len(algos)))
std_cost_diffs = np.array([np.std(d) for d in cost_diffs]).reshape((len(algos), len(algos)))
mean_hs_crosstable = pandas.DataFrame(mean_hs_diffs, columns=algos_names, index=algos_names)
mean_cost_crosstable = pandas.DataFrame(mean_cost_diffs, columns=algos_names, index=algos_names)
std_hs_crosstable = | pandas.DataFrame(std_hs_diffs, columns=algos_names, index=algos_names) | pandas.DataFrame |
# Copyright 2020 AI2Business. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test-Environment for trends_collector."""
import pandas as pd
from ai2business.kpi_collector import trends_collector as tdc
trends = tdc.TrendsCollector()
builder = tdc.DesignerTrendsCollector(["AI", "Business", "AI2Business"])
trends.builder = builder
def test_interest_over_time() -> None:
trends.find_interest_over_time()
assert type(builder.trends.return_product["get_interest_over_time"]) == type(
pd.DataFrame()
)
def test_interest_by_region() -> None:
trends.find_interest_by_region()
assert type(builder.trends.return_product["get_interest_by_region"]) == type(
pd.DataFrame()
)
def test_trending_searches() -> None:
trends.find_trending_searches("japan")
assert type(builder.trends.return_product["get_trending_searches"]) == type(
| pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report,confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
df = | pd.read_csv("Classified Data",index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # PCA (Principal Components Analysis)
# ## wine.cvs
# In[16]:
#importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn.decomposition as sk
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
# In[17]:
#imporing the dataset
wine = pd.read_csv("C:\\Users\\yhari\\OneDrive\\Documents\\1. Data Analyst Training\\CSV files\\wine.csv")
# In[18]:
#describe the dataset
wine.describe()
# In[19]:
#view top 5 records
wine.head()
# In[20]:
#Consider only the numerical data
wine.data = wine.iloc[:,1:]
# In[21]:
wine.data.head()
# In[22]:
#Standardizing;Normalizing the numerical data
wine_norm = scale(wine.data)
# In[23]:
#look for the type of data, array or dataframe
type(wine_norm)
# In[24]:
#convert ndarray into a dataframe
wine1 = | pd.DataFrame(wine_norm) | pandas.DataFrame |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', update=True)
assert s.root.stale == True
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
# Check that rebalance with update=False
# does not mark the node as stale
s.rebalance(0.6, 'c1', update=False)
assert s.root.stale == False
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via parent method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
parent = Strategy('m', [do_nothing], [child1, 'a'])
child1 = parent['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': | pd.Series(data=1, index=dts, name='a') | pandas.Series |
import warnings
warnings.filterwarnings("ignore")
import os
import json
import argparse
import time
import datetime
import json
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from scipy.stats import spearmanr, mannwhitneyu
import scipy.cluster.hierarchy as shc
from skbio.stats.composition import clr
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from scipy.cluster.hierarchy import cut_tree
from src.models.MiMeNet import MiMeNet, tune_MiMeNet
###################################################
# Read in command line arguments
###################################################
parser = argparse.ArgumentParser(description='Perform MiMeNet')
parser.add_argument('-micro', '--micro', help='Comma delimited file representing matrix of samples by microbial features', required=True)
parser.add_argument('-metab', '--metab', help= 'Comma delimited file representing matrix of samples by metabolomic features', required=True)
parser.add_argument('-external_micro', '--external_micro', help='Comma delimited file representing matrix of samples by microbial features')
parser.add_argument('-external_metab', '--external_metab', help= 'Comma delimited file representing matrix of samples by metabolomic features')
parser.add_argument('-annotation', '--annotation', help='Comma delimited file annotating subset of metabolite features')
parser.add_argument('-labels', '--labels', help="Comma delimited file for sample labels to associate clusters with")
parser.add_argument('-output', '--output', help='Output directory', required=True)
parser.add_argument('-net_params', '--net_params', help='JSON file of network hyperparameters', default=None)
parser.add_argument('-background', '--background', help='Directory with previously generated background', default=None)
parser.add_argument('-num_background', '--num_background', help='Number of background CV Iterations', default=100, type=int)
parser.add_argument('-micro_norm', '--micro_norm', help='Microbiome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-metab_norm', '--metab_norm', help='Metabolome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-threshold', '--threshold', help='Define significant correlation threshold', default=None)
parser.add_argument('-num_run_cv', '--num_run_cv', help='Number of iterations for cross-validation', default=1, type=int)
parser.add_argument('-num_cv', '--num_cv', help='Number of cross-validated folds', default=10, type=int)
parser.add_argument('-num_run', '--num_run', help='Number of iterations for training full model', type=int, default=10)
args = parser.parse_args()
micro = args.micro
metab = args.metab
external_micro = args.external_micro
external_metab = args.external_metab
annotation = args.annotation
out = args.output
net_params = args.net_params
threshold = args.threshold
micro_norm = args.micro_norm
metab_norm = args.metab_norm
num_run_cv = args.num_run_cv
num_cv = args.num_cv
num_run = args.num_run
background_dir = args.background
labels = args.labels
num_bg = args.num_background
tuned = False
gen_background = True
if background_dir != None:
gen_background = False
start_time = time.time()
if external_metab != None and external_micro == None:
print("Warning: External metabolites found with no external microbiome...ignoring external set!")
external_metab = None
if net_params != None:
print("Loading network parameters...")
try:
with open(net_params, "r") as infile:
params = json.load(infile)
num_layer = params["num_layer"]
layer_nodes = params["layer_nodes"]
l1 = params["l1"]
l2 = params["l2"]
dropout = params["dropout"]
learning_rate = params["lr"]
tuned = True
print("Loaded network parameters...")
except:
print("Warning: Could not load network parameter file!")
###################################################
# Load Data
###################################################
metab_df = pd.read_csv(metab, index_col=0)
micro_df = pd.read_csv(micro, index_col=0)
if external_metab != None:
external_metab_df = pd.read_csv(external_metab, index_col=0)
if external_micro != None:
external_micro_df = pd.read_csv(external_micro, index_col=0)
###################################################
# Filter only paired samples
###################################################
samples = np.intersect1d(metab_df.columns.values, micro_df.columns.values)
num_samples = len(samples)
metab_df = metab_df[samples]
micro_df = micro_df[samples]
for c in micro_df.columns:
micro_df[c] = pd.to_numeric(micro_df[c])
for c in metab_df.columns:
metab_df[c] = pd.to_numeric(metab_df[c])
if external_metab != None and external_micro != None:
external_samples = np.intersect1d(external_metab_df.columns.values, external_micro_df.columns.values)
external_metab_df = external_metab_df[external_samples]
external_micro_df = external_micro_df[external_samples]
for c in external_micro_df.columns:
external_micro_df[c] = pd.to_numeric(external_micro_df[c])
for c in external_metab_df.columns:
external_metab_df[c] = pd.to_numeric(external_metab_df[c])
num_external_samples = len(external_samples)
elif external_micro != None:
external_samples = external_micro_df.columns.values
external_micro_df = external_micro_df[external_samples]
for c in external_micro_df.columns:
external_micro_df[c] = pd.to_numeric(external_micro_df[c])
num_external_samples = len(external_samples)
###################################################
# Create output directory
###################################################
dirName = 'results'
try:
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
dirName = 'results/' + out
try:
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
dirName = 'results/' + out + "/Images"
try:
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
###################################################
# Filter lowly abundant samples
###################################################
to_drop = []
for microbe in micro_df.index.values:
present_in = sum(micro_df.loc[microbe] > 0.0000)
if present_in <= 0.1 * num_samples:
to_drop.append(microbe)
micro_df = micro_df.drop(to_drop, axis=0)
to_drop = []
for metabolite in metab_df.index.values:
present_in = sum(metab_df.loc[metabolite] > 0.0000)
if present_in <= 0.1 * num_samples:
to_drop.append(metabolite)
metab_df = metab_df.drop(to_drop, axis=0)
if external_micro != None:
common_features = np.intersect1d(micro_df.index.values, external_micro_df.index.values)
micro_df = micro_df.loc[common_features]
external_micro_df = external_micro_df.loc[common_features]
if external_metab != None:
common_features = np.intersect1d(metab_df.index.values, external_metab_df.index.values)
metab_df = metab_df.loc[common_features]
external_metab_df = external_metab_df.loc[common_features]
###################################################
# Transform data to Compositional Data
###################################################
# Transform Microbiome Data
if micro_norm == "CLR":
micro_comp_df = pd.DataFrame(data=np.transpose(clr(micro_df.transpose() + 1)),
index=micro_df.index, columns=micro_df.columns)
if external_micro:
external_micro_comp_df = pd.DataFrame(data=np.transpose(clr(external_micro_df.transpose() + 1)),
index=external_micro_df.index, columns=external_micro_df.columns)
elif micro_norm == "RA":
col_sums = micro_df.sum(axis=0)
micro_comp_df = micro_df/col_sums
if external_micro:
col_sums = external_micro_df.sum(axis=0)
external_micro_comp_df = external_micro_df/col_sums
else:
micro_comp_df = micro_df
if external_micro:
external_micro_comp_df = external_micro_df
# Normalize Metabolome Data
if metab_norm == "CLR":
metab_comp_df = pd.DataFrame(data=np.transpose(clr(metab_df.transpose() + 1)),
index=metab_df.index, columns=metab_df.columns)
if external_metab:
external_metab_comp_df = pd.DataFrame(data=np.transpose(clr(external_metab_df.transpose() + 1)),
index=external_metab_df.index, columns=external_metab_df.columns)
elif metab_norm == "RA":
col_sums = metab_df.sum(axis=0)
metab_comp_df = metab_df/col_sums
if external_metab:
col_sums = external_metab_df.sum(axis=0)
external_metab_comp_df = external_metab_df/col_sums
else:
metab_comp_df = metab_df
if external_metab:
external_metab_comp_df = external_metab_df
micro_comp_df = micro_comp_df.transpose()
metab_comp_df = metab_comp_df.transpose()
if external_micro:
external_micro_comp_df = external_micro_comp_df.transpose()
if external_metab:
external_metab_comp_df = external_metab_comp_df.transpose()
###################################################
# Run Cross-Validation on Dataset
###################################################
score_matrices = []
print("Performing %d runs of %d-fold cross-validation" % (num_run_cv, num_cv))
cv_start_time = time.time()
tune_run_time = 0
micro = micro_comp_df.values
metab = metab_comp_df.values
dirName = 'results/' + out + '/CV'
try:
os.mkdir(dirName)
except FileExistsError:
pass
for run in range(0,num_run_cv):
# Set up output directory for CV runs
dirName = 'results/' + out + '/CV/' + str(run)
try:
os.mkdir(dirName)
except FileExistsError:
pass
# Set up CV partitions
kfold = KFold(n_splits=num_cv, shuffle=True)
cv = 0
for train_index, test_index in kfold.split(samples):
# Set up output directory for CV partition run
dirName = 'results/' + out + '/CV/' + str(run) + '/' + str(cv)
try:
os.mkdir(dirName)
except FileExistsError:
pass
# Partition data into training and test sets
train_micro, test_micro = micro[train_index], micro[test_index]
train_metab, test_metab = metab[train_index], metab[test_index]
train_samples, test_samples = samples[train_index], samples[test_index]
# Store training and test set partitioning
train_microbe_df = pd.DataFrame(data=train_micro, index=train_samples, columns=micro_comp_df.columns)
test_microbe_df = pd.DataFrame(data=test_micro, index=test_samples, columns=micro_comp_df.columns)
train_metab_df = pd.DataFrame(data=train_metab, index=train_samples, columns=metab_comp_df.columns)
test_metab_df = pd.DataFrame(data=test_metab, index=test_samples, columns=metab_comp_df.columns)
train_microbe_df.to_csv(dirName + "/train_microbes.csv")
test_microbe_df.to_csv(dirName + "/test_microbes.csv")
train_metab_df.to_csv(dirName + "/train_metabolites.csv")
test_metab_df.to_csv(dirName + "/test_metabolites.csv")
# Log transform data if RA
if micro_norm == "RA" or micro_norm == None:
train_micro = np.log(train_micro + 1)
test_micro = np.log(test_micro + 1)
if metab_norm == "RA" or metab_norm == None:
train_metab = np.log(train_metab + 1)
test_metab = np.log(test_metab + 1)
# Scale data before neural network training
micro_scaler = StandardScaler().fit(train_micro)
train_micro = micro_scaler.transform(train_micro)
test_micro = micro_scaler.transform(test_micro)
metab_scaler = StandardScaler().fit(train_metab)
train_metab = metab_scaler.transform(train_metab)
test_metab = metab_scaler.transform(test_metab)
# Aggregate paired microbiome and metabolomic data
train = (train_micro, train_metab)
test = (test_micro, test_metab)
# Tune hyperparameters if first partition
if tuned == False:
tune_start_time = time.time()
print("Tuning parameters...")
tuned = True
params = tune_MiMeNet(train)
l1 = params['l1']
l2 = params['l2']
num_layer=params['num_layer']
layer_nodes=params['layer_nodes']
dropout=params['dropout']
with open('results/' +out + '/network_parameters.txt', 'w') as outfile:
json.dump(params, outfile)
tune_run_time = time.time() - tune_start_time
print("Tuning run time: " + (str(datetime.timedelta(seconds=(tune_run_time)))))
print("Run: %02d\t\tFold: %02d" % (run + 1, cv + 1), end="\r")
# Construct Neural Network Model
model = MiMeNet(train_micro.shape[1], train_metab.shape[1], l1=l1, l2=l2,
num_layer=num_layer, layer_nodes=layer_nodes, dropout=dropout)
#Train Neural Network Model
model.train(train)
# Predict on test set
p = model.test(test)
inv_p = metab_scaler.inverse_transform(p)
if metab_norm == "RA" or metab_norm == None:
inv_p = np.exp(inv_p) - 1
inv_p = inv_p/np.sum(inv_p)
score_matrices.append(model.get_scores())
prediction_df = | pd.DataFrame(data=inv_p, index=test_samples, columns=metab_comp_df.columns) | pandas.DataFrame |
#encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:",time.time())
debug = False
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
nrows = 10000 * 1
train_df = shuffle(train_df, random_state=1234);
train_df = train_df.iloc[:nrows]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=nrows, parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", nrows=nrows, usecols=used_cols)
test_active = | pd.read_csv("../input/test_active.csv", nrows=nrows, usecols=used_cols) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Functions for importing data"""
import io, re, datetime, warnings
import xml.etree.ElementTree
import numpy as np
import pandas as pd
import xarray as xr
class MultipleScansException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class NoScansException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class ScanNotFoundException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def lidar_from_csv(rws, sequences=None, scans=None, scan_id=None, wind=None, attrs=None):
"""create a lidar object from Nathan's csv files"""
# start with the scan info
if scans is not None:
scan_file_xml = xml.etree.ElementTree.parse(scans).getroot()
if len(scan_file_xml) == 0:
raise NoScansException('no scans listed in the scan.xml file')
elif scan_id is not None:
# get the scan ID's
scan_ids = list(map(lambda x: int(x.get('id')), scan_file_xml.findall('lidar_scan')))
# get the corresponding scan info
try:
scan_index = scan_ids.index(scan_id)
except ValueError:
raise ScanNotFoundException('scan not found in scan.xml file')
scan_xml = scan_file_xml[scan_index]
else:
if len(scan_file_xml) > 1:
raise MultipleScansException('must provide a scan_id if file contains multiple scanning modes')
scan_xml = scan_file_xml[0]
scan_info = scan_xml[1][2][0].attrib
# add prefix 'scan' to all scan keys
scan_info = { 'scan_' + key: value for (key, value) in scan_info.items() }
# add scan info to the lidar attributes
if attrs is None:
attrs = scan_info
else:
attrs.update(scan_info)
else:
scan = None
# do this differently depending on the software version
try:
dtypes = {'Timestamp': str, 'Configuration ID': int,
'Scan ID': int, 'LOS ID': int, 'Azimuth [°]': float,
'Elevation [°]': float, 'Range [m]': float, 'RWS [m/s]': float,
'DRWS [m/s]': float, 'CNR [db]': float, 'Confidence Index [%]': float,
'Mean Error': float, 'Status': bool}
csv = pd.read_csv(rws, parse_dates=['Timestamp'], dtype=dtypes)
name_dict = {'Timestamp': 'Time', 'RWS [m/s]': 'RWS', 'DRWS [m/s]': 'DRWS', 'CNR [db]': 'CNR',
'Range [m]': 'Range', 'Configuration ID': 'Configuration', 'LOS ID': 'LOS',
'Azimuth [°]': 'Azimuth', 'Elevation [°]': 'Elevation'}
if 'Confidence Index [%]' in csv.columns:
name_dict['Confidence Index [%]'] = 'Confidence'
if 'Mean Error' in csv.columns:
name_dict['Mean Error'] = 'Error'
csv.rename(columns=name_dict, inplace=True)
profile_vars = ['LOS', 'Configuration', 'Azimuth', 'Elevation']
except ValueError:
# this happens if we're looking at a newer version of the data
# file using semicolons as separators
dtypes = {'Timestamp': str, 'Settings ID': int, 'Resolution ID': int,
'Scan ID': int, 'LOS ID': int, 'Sequence ID': int, 'Azimuth [°]': float,
'Elevation [°]': float, 'Range [m]': float, 'Radial Wind Speed [m/s]': float,
'Dispersion Radial Wind Speed [m/s]': float, 'CNR [dB]': float,
'Confidence Index [%]': float, 'Mean Error': float, 'Status': bool}
csv = pd.read_csv(rws, sep=';', parse_dates=['Timestamp'], dtype=dtypes)
name_dict = {'Timestamp': 'Time', 'Radial Wind Speed [m/s]': 'RWS',
'Dispersion Radial Wind Speed [m/s]': 'DRWS', 'CNR [dB]': 'CNR',
'Range [m]': 'Range', 'Configuration ID': 'Configuration', 'LOS ID': 'LOS',
'Azimuth [°]': 'Azimuth', 'Elevation [°]': 'Elevation',
'Settings ID': 'Settings', 'Resolution ID': 'Resolution',
'Confidence Index [%]': 'Confidence', 'Mean Error': 'Error',
'Sequence ID': 'Sequence'}
csv.rename(columns=name_dict, inplace=True)
profile_vars = ['LOS', 'Settings', 'Resolution', 'Azimuth', 'Elevation', 'Sequence']
if scan_id is not None:
csv = csv.loc[csv['Scan ID'] == scan_id]
# check that there's still data here:
# ...
# organize the data
data = csv.drop(profile_vars, 1).pivot(index='Time', columns='Range')
data.index = pd.to_datetime(data.index)
# these fields will be variables in the xarray object
# remove columns that don't exist in the csv file (for example, if not using the whole radial wind data)
measurement_vars = ['RWS', 'DRWS', 'CNR', 'Confidence', 'Error', 'Status']
measurement_vars = list(set(measurement_vars) & set(csv.columns))
# add sequences if we got a sequences.csv file
if sequences is not None:
seq_csv = | pd.read_csv(sequences, parse_dates=[3, 4]) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# - Edge weight is inferred by GNNExplainer and node importance is given by five eBbay annotators. Not every annotator has annotated each node.
# - Seed is the txn to explain.
# - id is the community id.
import os
import pickle
import math
from tqdm.auto import tqdm
import random
import pandas as pd
import numpy as np
import networkx as nx
import itertools
from collections import Counter
import scipy.stats
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from scipy.stats import ks_2samp
import numpy as np
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--random-draw', action = 'store', dest = 'random_draw', type = int, default = 100, help = 'Random draws to break the tie in ranking topk edges.')
parser.add_option('--edge-agg', action = 'store', dest = 'edge_agg', default = 'avg', choices = ['avg', 'min', 'sum'], help = 'Aggregation method to compute edge importance score based on the node importance scores.')
parser.add_option('--type-centrality', action = 'store', dest = 'type_centrality', default = 'edge_betweenness_centrality', choices = ['edge_betweenness_centrality', 'edge_current_flow_betweenness_centrality', 'edge_load_centrality'], help = 'Edge centrality calculation method.')
(options, args) = parser.parse_args()
print ("Options:", options)
# Load in the annotation file, the data seed, the edge weights by explainer, the edges in the communities.
DataNodeImp = pd.read_csv('../05GNNExplainer-eval-hitrate/input/annotation_publish.csv')
DataSeed = pd.read_csv('../05GNNExplainer-eval-hitrate/input/data-seed.txt')
DataEdgeWeight = | pd.read_csv('../05GNNExplainer-eval-hitrate/input/data-edge-weight.txt') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import numpy as np
import pandas as pd
import sample_functions
from sklearn import svm
ocsvm_nu = 0.003 # OCSVM における ν。トレーニングデータにおけるサンプル数に対する、サポートベクターの数の下限の割合
ocsvm_gammas = 2 ** np.arange(-20, 11, dtype=float) # γ の候補
dataset = | pd.read_csv('unique_m.csv', index_col=-1) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 12:33:58 2018
@author: michaelek
"""
import os
import pandas as pd
from hilltoppy import web_service as ws
from hilltoppy.util import convert_site_names
from pyhydrotel import get_ts_data, get_sites_mtypes
from pdsql import mssql
from time import sleep
import yaml
import util
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 30)
run_time_start = pd.Timestamp.today()
######################################
### Parameters
base_dir = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'parameters.yml')) as param:
param = yaml.safe_load(param)
if run_time_start.hour < 12:
to_date = run_time_start.floor('D') - pd.DateOffset(hours=12)
else:
to_date = run_time_start.floor('D') + pd.DateOffset(hours=12)
from_date = (to_date - pd.DateOffset(days=3)).floor('D')
allo_csv = 'above_66401_allo_2020-08-12.csv'
#from_date = pd.Timestamp('2019-07-01 00:30:00')
#to_date = pd.Timestamp('2019-02-03')
try:
######################################
### Get detided data
tsdata = get_ts_data(param['Output']['hydrotel_server'], 'hydrotel', param['Input']['detided_mtype'], str(param['Input']['site']), str(from_date), str(to_date), None).droplevel([0, 1])
tsdata.name = 'detided'
to_date = tsdata.index.max()
try:
#####################################
### Determine the Wap usage ratios
up_takes1 = pd.read_csv(os.path.join(base_dir, allo_csv))
up_takes2 = up_takes1[up_takes1.AllocatedRate > 0].copy()
up_takes2['AllocatedRateSum'] = up_takes2.groupby('Wap')['AllocatedRate'].transform('sum')
up_takes2['AllocatedRateRatio'] = up_takes2['AllocatedRate']/up_takes2['AllocatedRateSum']
wap_ratios = up_takes2[up_takes2.HydroGroup == 'Surface Water'].groupby('Wap')['AllocatedRateRatio'].sum()
wap_ratios.index.name = 'ExtSiteID'
####################################
### Pull out the Hilltop usage data
## Determine the sites available in Hilltop
ht_sites = ws.site_list(param['Input']['hilltop_base_url'], param['Input']['hilltop_hts'])
ht_sites['Wap'] = convert_site_names(ht_sites.SiteName)
ht_sites1 = ht_sites[ht_sites['Wap'].isin(wap_ratios.index) & ~ht_sites['Wap'].isin(param['Input']['browns_rock_waps'])].copy()
ht_sites1.rename(columns={'SiteName': 'Site'}, inplace=True)
mtype_list = []
for site in ht_sites1.Site:
timer = 10
while timer > 0:
try:
m1 = ws.measurement_list(param['Input']['hilltop_base_url'], param['Input']['hilltop_hts'], site)
break
except Exception as err:
err1 = err
timer = timer - 1
if timer == 0:
raise ValueError(err1)
else:
print(err1)
sleep(3)
mtype_list.append(m1)
mtypes = pd.concat(mtype_list).reset_index()
mtypes1 = mtypes[mtypes.To >= from_date]
mtypes2 = mtypes1[~mtypes1.Measurement.str.contains('regularity', case=False)].sort_values('To').drop_duplicates('Site', keep='last')
## Pull out the usage data and process
tsdata_list = []
for i, row in mtypes2.iterrows():
timer = 10
while timer > 0:
try:
t1 = ws.get_data(param['Input']['hilltop_base_url'], param['Input']['hilltop_hts'], row['Site'], row['Measurement'], str(from_date), str(row['To']))
break
except Exception as err:
err1 = err
timer = timer - 1
if timer == 0:
raise ValueError(err1)
else:
print(err1)
sleep(3)
tsdata_list.append(t1)
tsdata1 = pd.concat(tsdata_list)
tsdata2 = util.proc_ht_use_data_ws(tsdata1)
## Apply WAP ratios
tsdata2a = pd.merge(tsdata2.reset_index(), wap_ratios.reset_index(), on='ExtSiteID')
tsdata2a['Rate'] = tsdata2a['AllocatedRateRatio'] * tsdata2a['Value']
tsdata2b = tsdata2a.set_index(['ExtSiteID', 'DateTime'])[['Rate']]
## Reformat and aggregate to sngle time series
tsdata3 = tsdata2b.unstack(0)[:to_date].droplevel(0, axis=1)
other_ts = tsdata3.ffill().sum(axis=1)/15/60
other_ts.name = 'other'
except Exception as err:
print('*Extraction of water usage data failed')
print(err)
alt_dates = pd.date_range(from_date, to_date, freq='15T')
other_ts = pd.Series(0, index=alt_dates, name='other')
other_ts.index.name = 'DateTime'
#############################################
### Browns Rock data
br_ts = get_ts_data(param['Input']['hydrotel_server'], 'hydrotel', sites=param['Input']['browns_rock_site'], mtypes=param['Input']['browns_rock_mtype'], from_date=str(from_date), to_date=str(to_date), resample_code=None).droplevel([0, 1])
br_ts.name = 'br'
#############################################
### Combine all datasets
combo1 = | pd.concat([tsdata, other_ts, br_ts], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from keras.models import load_model
from sklearn.metrics import roc_curve, roc_auc_score, auc, precision_recall_curve, average_precision_score
import os
import pickle
from scipy.special import softmax
from prg import prg
class MetricsGenerator(object):
def __init__(self, dataset_dir, model_dir, metrics_dir):
self._model_dir = model_dir
self._metrics_dir = metrics_dir
self._train_x = pd.read_csv(dataset_dir + "train_x.csv")
self._test_x = pd.read_csv(dataset_dir + "test_x.csv")
self._train_x = self._train_x.drop(self._train_x.columns[0], axis=1)
self._test_x = self._test_x.drop(self._test_x.columns[0], axis=1)
self._train_y = pd.read_csv(dataset_dir + "train_y.csv")
self._test_y = pd.read_csv(dataset_dir + "test_y.csv")
def generate_metrics_for_model(self, model):
error_df = self.get_error_df(model)
roc_df, roc_auc_df = self.get_roc_and_auc_df(error_df)
precision_recall_df, precision_recall_auc_df, average_precision_score_df = self.get_precision_recall_and_auc_df(error_df)
prg_df, prg_auc_df = self.get_prg_and_auc_df(error_df)
history_df = self.get_history_df(model)
self.create(self._metrics_dir + "model" + str(model))
self.store_df("error_df", model,error_df)
self.store_df("roc_df", model, roc_df)
self.store_df("roc_auc_df", model, roc_auc_df)
self.store_df("precision_recall_df", model, precision_recall_df)
self.store_df("precision_recall_auc_df", model, precision_recall_auc_df)
self.store_df("average_precision_score_df", model, average_precision_score_df)
self.store_df("prg_df", model, prg_df)
self.store_df("prg_auc_df", model, prg_auc_df)
self.store_df("history_df", model, history_df)
def get_error_df(self, model):
model = load_model(self._model_dir + "model" + str(model) + ".h5")
test_x_predicted = model.predict(self._test_x)
mse = np.mean(np.power(self._test_x - test_x_predicted, 2), axis = 1)
error_df = pd.DataFrame({'Reconstruction_error':mse, 'True_values': self._test_y['target']})
return error_df
def get_roc_and_auc_df(self, error_df):
false_pos_rate, true_pos_rate, thresholds = roc_curve(error_df.True_values, error_df.Reconstruction_error)
i = np.arange(len(true_pos_rate))
roc_df = pd.DataFrame({'FPR': pd.Series(false_pos_rate, index=i), 'TPR': pd.Series(true_pos_rate, index=i), 'Threshold': pd.Series(thresholds, index=i)})
roc_auc = roc_auc_score(error_df.True_values, error_df.Reconstruction_error)
i = np.arange(1)
roc_auc_df = pd.DataFrame({'AUC': pd.Series(roc_auc, index=i)})
return roc_df, roc_auc_df
def get_precision_recall_and_auc_df(self, error_df):
precision, recall, thresholds = precision_recall_curve(error_df.True_values, error_df['Reconstruction_error'])
precision = precision[:-1]
recall = recall[:-1]
i = np.arange(len(precision))
precision_recall_df = pd.DataFrame({'Precision': pd.Series(precision, index=i), 'Recall':pd.Series(recall, index=i), 'Threshold':pd.Series(thresholds, index=i)})
i = np.arange(1)
precision_recall_auc = auc(recall, precision)
precision_recall_auc_df = pd.DataFrame({'AUC': | pd.Series(precision_recall_auc, index=i) | pandas.Series |
#!/usr/bin/env python
import math
from Bio import SeqIO
import pandas as pd
import sys
import matplotlib.pyplot as plt
import logomaker as lm
###
dna = {'A': [1, 0, 0, 0, 0], 'C': [0, 1, 0, 0, 0], 'G': [0, 0, 1, 0, 0], 'T': [0, 0, 0, 1, 0], '-': [0, 0, 0, 0, 1], 'Y': [0, 0.5, 0, 0.5, 0], 'K': [0, 0, 0.5, 0.5, 0], 'S': [0, 0.5, 0.5, 0, 0], 'M': [0.5, 0.5, 0, 0, 0], 'R': [0.5, 0, 0.5, 0, 0], 'W': [0.5, 0, 0, 0.5, 0], 'V': [0.3333, 0.3333, 0.3333, 0, 0], 'H': [0.3333, 0.3333, 0, 0.3333, 0], 'D': [0.3333, 0, 0.3333, 0.3333, 0], 'B': [0, 0.3333, 0.3333, 0.3333, 0], 'N': [0.25, 0.25, 0.25, 0.25, 0]}
dna_df = pd.DataFrame(dna, index=['A','C','G','T','-'])
# print(dna_df)
file = sys.argv[1]
count_df = | pd.DataFrame({0: [0, 0, 0, 0, 0]}, index=['A','C','G','T','-']) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
import scipy.ndimage
import skimage
import matplotlib._contour
from matplotlib.pyplot import get_cmap as mpl_get_cmap
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import altair as alt
def _outliers(data):
bottom, middle, top = np.percentile(data, [25, 50, 75])
iqr = top - bottom
top_whisker = min(top + 1.5*iqr, data.max())
bottom_whisker = max(bottom - 1.5*iqr, data.min())
outliers = data[(data > top_whisker) | (data < bottom_whisker)]
return outliers
def _box_and_whisker(data):
middle = data.median()
bottom = data.quantile(0.25)
top = data.quantile(0.75)
iqr = top - bottom
top_whisker = min(top + 1.5*iqr, data.max())
bottom_whisker = max(bottom - 1.5*iqr, data.min())
return pd.Series({'middle': middle,
'bottom': bottom,
'top': top,
'top_whisker': top_whisker,
'bottom_whisker': bottom_whisker})
def _jitter(x, jitter_width=0.2):
"""Make x-coordinates for a jitter plot."""
return (pd.Categorical(x).codes
+ np.random.uniform(low=-jitter_width,
high=jitter_width,
size=len(x)))
def _convert_data(data, inf_ok=False, min_len=1):
"""
Convert inputted 1D data set into NumPy array of floats.
All nan's are dropped.
Parameters
----------
data : int, float, or array_like
Input data, to be converted.
inf_ok : bool, default False
If True, np.inf values are allowed in the arrays.
min_len : int, default 1
Minimum length of array.
Returns
-------
output : ndarray
`data` as a one-dimensional NumPy array, dtype float.
"""
# If it's scalar, convert to array
if np.isscalar(data):
data = np.array([data], dtype=np.float)
# Convert data to NumPy array
data = np.array(data, dtype=np.float)
# Make sure it is 1D
if len(data.shape) != 1:
raise RuntimeError('Input must be a 1D array or Pandas series.')
# Remove NaNs
data = data[~np.isnan(data)]
# Check for infinite entries
if not inf_ok and np.isinf(data).any():
raise RuntimeError('All entries must be finite.')
# Check to minimal length
if len(data) < min_len:
raise RuntimeError('Array must have at least {0:d} non-NaN entries.'.format(min_len))
return data
def ecdf_vals(data, formal=False, x_min=None, x_max=None):
"""Get x, y, values of an ECDF for plotting.
Parameters
----------
data : ndarray
One dimensional Numpay array with data.
formal : bool, default False
If True, generate x and y values for formal ECDF (staircase). If
False, generate x and y values for ECDF as dots.
x_min : float, 'infer', or None
Minimum value of x to plot. If 'infer', use a 5% buffer. Ignored
if `formal` is False.
x_max : float, 'infer', or None
Maximum value of x to plot. If 'infer', use a 5% buffer. Ignored
if `formal` is False.
Returns
-------
x : ndarray
x-values for plot
y : ndarray
y-values for plot
"""
x = np.sort(data)
y = np.arange(1, len(data)+1) / len(data)
if formal:
# Set up output arrays
x_formal = np.empty(2*(len(x) + 1))
y_formal = np.empty(2*(len(x) + 1))
# y-values for steps
y_formal[:2] = 0
y_formal[2::2] = y
y_formal[3::2] = y
# x- values for steps
x_formal[0] = x[0]
x_formal[1] = x[0]
x_formal[2::2] = x
x_formal[3:-1:2] = x[1:]
x_formal[-1] = x[-1]
# Put lines at y=0
if x_min is not None:
if x_min == 'infer':
x_min = x.min() - (x.max() - x.min())*0.05
elif x_min > x.min():
raise RuntimeError('x_min > x.min().')
x_formal = np.concatenate(((x_min,), x_formal))
y_formal = np.concatenate(((0,), y_formal))
# Put lines at y=y.max()
if x_max is not None:
if x_max == 'infer':
x_max = x.max() + (x.max() - x.min())*0.05
elif x_max < x.max():
raise RuntimeError('x_max < x.max().')
x_formal = np.concatenate((x_formal, (x_max,)))
y_formal = np.concatenate((y_formal, (y.max(),)))
return x_formal, y_formal
else:
return x, y
def ecdf_y(data):
"""Give y-values of an ECDF for an unsorted column in a data frame.
Parameters
----------
data : Pandas Series
Series (or column of a DataFrame) from which to generate ECDF
values
Returns
-------
output : Pandas Series
Corresponding y-values for an ECDF when plotted with dots.
Notes
-----
.. This only works for plotting an ECDF with points, not for formal
ECDFs
"""
return data.rank(method='first') / len(data)
def ecdf_dataframe(data=None, x=None, color=None, formal=False):
"""Generate a DataFrame that can be used for plotting ECDFs.
Parameters
----------
data : Pandas DataFrame
A tidy data frame.
x : valid column name of Pandas DataFrame
Column of data frame containing values to use in ECDF plot.
color : valid column name of Pandas DataFrame or list of column
names
Column(s) of DataFrame to use for grouping the data. A unique
set of ECDF values is made for each. If None, no groupby
operations are performed and a single ECDF is generated.
formal : bool, default False
If True, generate x and y values for formal ECDF (staircase). If
False, generate x and y values for ECDF as dots.
Returns
-------
output : Pandas DataFrame
Pandas DataFrame with two or three columns.
x : Column named for inputted `x`, data values.
'ECDF': Values for y-values for plotting the ECDF
color : Keys for groups. Omitted if `color` is None.
"""
if data is None:
raise RuntimeError('`data` must be specified.')
if x is None:
raise RuntimeError('`x` must be specified.')
# Determine ranges of plots
if formal:
data_min = data[x].min()
data_max = data[x].max()
x_min = data_min - (data_max - data_min) * 0.05
x_max = data_max + (data_max - data_min) * 0.05
else:
x_min = None
x_max = None
if color is None:
x_ecdf, y_ecdf = ecdf_vals(data[x].values,
formal=formal,
x_min=x_min,
x_max=x_max)
return pd.DataFrame({x: x_ecdf, 'ECDF': y_ecdf})
else:
grouped = data.groupby(color)
df_list = []
for g in grouped:
if type(g[0]) == tuple:
cat = ', '.join([str(c) for c in g[0]])
else:
cat = g[0]
x_ecdf, y_ecdf = ecdf_vals(g[1][x],
formal=formal,
x_min=x_min,
x_max=x_max)
df_list.append(pd.DataFrame(data={color: [cat]*len(x_ecdf),
x: x_ecdf,
'ECDF': y_ecdf}))
return | pd.concat(df_list, ignore_index=True) | pandas.concat |
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
from . import registry
from .. import runs, files
from logging import getLogger
log = getLogger(__name__)
def array(run, channel):
return registry.reader(run, channel).array()
def pandas(run, channel, field=None, rule='60s', **kwargs):
r = registry.reader(run, channel)
if not r.ready():
raise ValueError(f'Reader for "{run}" "{channel}" is not ready')
if rule is None:
df = r.pandas()
else:
df = r.resample(rule, **kwargs)
if field is not None:
df = df[field]
return df
def compare(rs, channel, *args, query='', **kwargs):
if not isinstance(channel, str):
return pd.concat({c: compare(rs, c, *args, query=query, **kwargs) for c in channel}, 1)
rs = [rs] if isinstance(rs, str) else rs
ns = [n for r in rs for n in (runs.pandas(r).query(query) if query else runs.pandas(r)).index]
df = {}
for n in ns:
try:
df[n] = pandas(n, channel, *args, **kwargs)
except OSError:
log.info(f'Couldn\'t find data for "{n}"')
return pd.concat(df, 1)
def plot(*args, ffill=False, skip=None, head=None, **kwargs):
df = compare(*args, **kwargs)
desc = runs.pandas().loc[df.columns, 'description'].fillna('')
df.columns = [f'{c}: {desc[c]}' for c in df.columns]
if ffill:
df = df.ffill().where(df.bfill().notnull())
ax = df.iloc[skip:head].plot()
ax.grid(True)
return ax
def periodic(*args, period=900, **kwargs):
from IPython import display
epoch = pd.Timestamp('2020-1-1')
last = 0
ax = None
while True:
now = pd.Timestamp.now()
new = int((now - epoch).total_seconds())//period
if new > last:
display.clear_output(wait=True)
if ax is not None:
plt.close(ax.figure)
ax = plot(*args, **kwargs)
ax.set_title(f'{now:%Y-%m-%d %H:%M:%S}')
display.display(ax.figure)
last = new
else:
time.sleep(1)
def purge(minlen=300, cutoff=900):
from tqdm.auto import tqdm
for r in tqdm(runs.runs()):
try:
start = pd.to_datetime(runs.info(r)['_created'])
end = start
for f in files.files(r):
mtime = pd.Timestamp(files.path(r, f).lstat().st_mtime, unit='s', tz='UTC')
end = max(end, mtime)
length = end - start
cut = | pd.Timestamp.now('UTC') | pandas.Timestamp.now |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index= | Index([0, 1, 2], dtype="O") | pandas.Index |
import pandas as pd
import datetime
def formatTopStocks(top):
top_data = {"code": [], "name": [], "increase": [], "price": [],
"totalCirculationValue": [], "volume": [], "mainNet": [],
"mainBuy": [], "mainSell": [], "concept": []}
for t in top:
top_data['code'].append(t[0])
top_data['name'].append(t[1])
top_data['increase'].append(t[3])
top_data['price'].append(t[2])
top_data['totalCirculationValue'].append(t[7])
top_data['volume'].append(t[4])
top_data['mainNet'].append(t[10])
top_data['mainBuy'].append(t[8])
top_data['mainSell'].append(t[9])
top_data['concept'].append(t[12])
df = | pd.DataFrame(top_data) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%capture
# Compile and import local pyrossgeo module
import os, sys
owd = os.getcwd()
os.chdir('../../')
sys.path.insert(0,'../../')
# !python setup.py build_ext --inplace
os.chdir(owd)
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pyrossgeo
import pandas as pd
import json
# -
# # Generate the configuration files
# ### Define model
# +
model = {
"settings" : {
"classes" : ["S", "E", "A", "I", "R"],
"stochastic_threshold_from_below" : [1000, 1000, 1000, 1000, 1000],
"stochastic_threshold_from_above" : [500, 500, 500, 500, 500],
"infection_scaling" : "powerlaw",
"infection_scaling_parameters" : [0, 0.004, 0.5] # a + b * rho^c
},
"S" : {
"linear" : [],
"infection" : [ ["I", "-betaI"], ["A", "-betaA"] ]
},
"E" : {
"linear" : [ ["E", "-gammaE"] ],
"infection" : [ ["I", "betaI"], ["A", "betaA"] ]
},
"A" : {
"linear" : [ ["E", "gammaE"], ["A", "-gammaA"] ],
"infection" : []
},
"I" : {
"linear" : [ ["A", "gammaA"], ["I", "-gammaI"] ],
"infection" : []
},
"R" : {
"linear" : [ ["I", "gammaI"] ],
"infection" : []
}
}
model_classes = model['settings']['classes']
model_dim = len(model_classes)
# -
# ### Configuration generation parameters
#
# Here we define some parameters with which all the configuration files will be generated. Edit these if you want to change the simulation.
# +
sim_config_path = 'london_simulation'
min_num_moving = 20 # Remove all commuting edges where less than `min_num_moving` are moving
# Decide which classes are allowed to commute
allow_class = [
('S', True),
('E', True),
('A', True),
('Ia1', True),
('Ia2', True),
('Ia3', True),
('Is1', True),
('Is2', False),
('Is3', False),
('R', True),
]
# Decide where to seed with infecteds
seed_pop = [
(0, 1, 'E', 100), # Home, age group, model class, seed quantity
(10, 2, 'E', 100),
(23, 0, 'E', 100),
(622, 4, 'E', 100),
(232, 4, 'E', 100)
]
# Node parameters
n_betaI = 0.02
n_betaA = 0.02
n_gammaE = 1/3.0
n_gammaA = 1/3.0
n_gammaI = 1/3.0
# Cnode parameters
cn_betaI = n_betaI
cn_betaA = n_betaA
cn_gammaE = n_gammaE
cn_gammaA = n_gammaA
cn_gammaI = n_gammaI
# Time steps
t_start = 0
t_end = 24*60*100
_, dts = pyrossgeo.utils.get_dt_schedule([
(0, 1*60),
(7*60, 2),
(10*60, 2*60),
(17*60, 2),
(19*60, 2*60)
], end_time=24*60)
# -
# ### Format the commuting network
# +
cn = pd.read_csv("%s/commuter_networks.csv" % sim_config_path)
#### Set which classes are allowed to commute
# Drop the current allow_O columns
cn = cn.iloc[:,:10]
# Set allow settings
for O, allow_O in allow_class:
cn[ "Allow %s" % O ] = 1 if allow_O else 0
# Allow people to return home
cn.loc[ cn['Home'] == cn['To'],"Allow %s" % allow_class[0][0]:] = 1
#### Remove commuting edges where fewer than `min_num_moving` people are commuting
delete_rows = []
for i, row in cn.loc[ cn['Home'] == cn['From'] ].iterrows():
if row['# to move'] < min_num_moving:
delete_rows.append(i)
delete_rows.append(i+1) # Delete the returning commuting edge as well
cn = cn.reset_index()
cn = cn.drop(delete_rows)
cn = cn.drop(columns='index')
cn.loc[cn['ct1'] == cn['ct2'], 'ct2'] += 0.1
cn.head()
# -
# ### Populate the network
# Our `node_populations.csv` currently only has the total population for each age group at each node. In order to use it for the simulation, we must populate it with the model classes, as well as seed some infections.
tot_pop = pd.read_csv("%s/node_populations.csv" % sim_config_path)
tot_pop.head()
# +
# Create all model classes, and set everyone to be susceptible
npop = pd.DataFrame()
npop['Home'] = tot_pop['Home']
npop['Location'] = tot_pop['Location']
for _cn, _cd in tot_pop.iloc[:,2:].iteritems():
for O in model['settings']['classes']:
npop["%s%s" % (O, _cn[1:])] = 0
npop["%s%s" % ("S", _cn[1:])] = _cd
# Seed with infecteds
for home, age, O, seed_quantity in seed_pop:
row_i = npop[npop['Home'] == home].index[0]
col_i = 2 + age*model_dim
S = npop.iloc[row_i,col_i]
npop.iloc[row_i, col_i + model_classes.index('E')] = seed_quantity
npop.iloc[row_i, col_i] -= seed_quantity
# -
# ### Setting the node and cnode parameters
# We need to add rows giving the model parameters in `node_parameters.csv` and `cnode_parameters.csv`, which currently only has the areas of each geographical node:
nparam = pd.read_csv('london_simulation/node_parameters.csv')
cnparam = | pd.read_csv('london_simulation/cnode_parameters.csv') | pandas.read_csv |
import math
import os
import time
from datetime import datetime
from math import inf
from heapq import heappop, heappush
import collections
import functools
from collections import defaultdict
import heapq
import random
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import gurobipy as gp
from gurobipy import *
from shapely.geometry import Point,LineString
import geopandas as gpd
import osmnx as ox
class World:
"""
一个类
"""
Observation = collections.namedtuple('Observation', 'traveltime origin destination') # 起点位置的集合
def __init__(self, type=0, num=100, sigma=0, reg=0, time_limit=0.6):
"""
nodeUrl: 图对象的点的标识信息和位置信息
edgeUrl: 图对象的弧的标识信息、位置信息以及连接信息
type: 选择图对象的类型,0为small,1为normal
超参数num,sigma,reg
"""
self.type = type
self.num = num
self.sigma = sigma
self.reg = reg
self.time_limit = time_limit
def True_Graph(self):
"""
如果type=0时,加载small_model的真实图。如果type=1时,加载normal_model的真实图。如果其他情况,加载manhattan的真实图。
:return: 返回一个加载好的的图G对象
"""
if self.type == 0:
# <载入文件模块>
df_nodelist = pd.read_csv("../train_dataset/smallnodelist.csv")
df_edgelist = pd.read_csv("../train_dataset/smalledgelist.csv")
# 创建多重有向图,add_edge(1,2), add_edge(2,1)
T = nx.MultiDiGraph() # 初始化图并载入点和边模块
T.add_nodes_from(df_nodelist['node']) # 添加点auto
T.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络arcTime和distance模块>
for u, v, d in T.edges(data=True):
T.edges[u, v, 0]['distance'] = 1
for u, v, d in T.edges(data=True): # 设置outside的行程时间
T.edges[u, v, 0]['arcTime'] = 1
T.edges[7, 8, 0]['arcTime'] = 4
T.edges[8, 7, 0]['arcTime'] = 4
T.edges[8, 9, 0]['arcTime'] = 4
T.edges[9, 8, 0]['arcTime'] = 4
T.edges[12, 13, 0]['arcTime'] = 4
T.edges[13, 12, 0]['arcTime'] = 4
T.edges[13, 14, 0]['arcTime'] = 4
T.edges[14, 13, 0]['arcTime'] = 4
T.edges[17, 18, 0]['arcTime'] = 4
T.edges[18, 17, 0]['arcTime'] = 4
T.edges[18, 19, 0]['arcTime'] = 4
T.edges[19, 18, 0]['arcTime'] = 4
T.edges[7, 12, 0]['arcTime'] = 4
T.edges[12, 7, 0]['arcTime'] = 4
T.edges[12, 17, 0]['arcTime'] = 4
T.edges[17, 12, 0]['arcTime'] = 4
T.edges[8, 13, 0]['arcTime'] = 4
T.edges[13, 8, 0]['arcTime'] = 4
T.edges[13, 18, 0]['arcTime'] = 4
T.edges[18, 13, 0]['arcTime'] = 4
T.edges[9, 14, 0]['arcTime'] = 4
T.edges[14, 9, 0]['arcTime'] = 4
T.edges[14, 19, 0]['arcTime'] = 4
T.edges[19, 14, 0]['arcTime'] = 4
return T
elif self.type == 1:
# <载入文件模块>
df_nodelist = pd.read_csv('../train_dataset/normalnodelist.csv')
df_edgelist = pd.read_csv('../train_dataset/normaledgelist.csv')
# 创建多重有向图,add_edge(1,2), add_edge(2,1)
T = nx.MultiDiGraph() # 初始化图并载入点和边模块
T.add_nodes_from(df_nodelist['node']) # 添加点auto
T.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络arcTime和distance模块>
for u, v, d in T.edges(data=True):
T.edges[u, v, 0]['distance'] = 1
for u, v, d in T.edges(data=True): # 设置outside的行程时间
T.edges[u, v, 0]['arcTime'] = 1
T.edges[31, 32, 0]['arcTime'] = 4 # 设置upper-left的行程时间
T.edges[32, 31, 0]['arcTime'] = 4
T.edges[31, 51, 0]['arcTime'] = 4 # 设置第2row的weight
T.edges[51, 31, 0]['arcTime'] = 4
for i in range(32, 39):
T.edges[i, i - 1, 0]['arcTime'] = 4
T.edges[i - 1, i, 0]['arcTime'] = 4
T.edges[i, i + 1, 0]['arcTime'] = 4
T.edges[i + 1, i, 0]['arcTime'] = 4
T.edges[i, i + 20, 0]['arcTime'] = 4
T.edges[i + 20, i, 0]['arcTime'] = 4
T.edges[39, 38, 0]['arcTime'] = 4
T.edges[38, 39, 0]['arcTime'] = 4
T.edges[39, 59, 0]['arcTime'] = 4
T.edges[59, 39, 0]['arcTime'] = 4
for j in range(51, 191, 20): # 设置第3row到第9row的weight
T.edges[j, j + 1, 0]['arcTime'] = 4
T.edges[j + 1, j, 0]['arcTime'] = 4
T.edges[j, j - 20, 0]['arcTime'] = 4
T.edges[j - 20, j, 0]['arcTime'] = 4
T.edges[j, j + 20, 0]['arcTime'] = 4
T.edges[j + 20, j, 0]['arcTime'] = 4
for i in range(j + 1, j + 8):
T.edges[i, i - 1, 0]['arcTime'] = 4
T.edges[i - 1, i, 0]['arcTime'] = 4
T.edges[i, i + 1, 0]['arcTime'] = 4
T.edges[i + 1, i, 0]['arcTime'] = 4
T.edges[i, i - 20, 0]['arcTime'] = 4
T.edges[i - 20, i, 0]['arcTIme'] = 4
T.edges[i, i + 20, 0]['arcTime'] = 4
T.edges[i + 20, i, 0]['arcTime'] = 4
T.edges[j + 8, j + 8 - 1, 0]['arcTime'] = 4
T.edges[j + 8 - 1, j + 8, 0]['arcTime'] = 4
T.edges[j + 8, j + 8 - 20, 0]['arcTime'] = 4
T.edges[j + 8 - 20, j + 8, 0]['arcTime'] = 4
T.edges[j + 8, j + 8 + 20, 0]['arcTime'] = 4
T.edges[j + 8 + 20, j + 8, 0]['arcTime'] = 4
T.edges[191, 192, 0]['arcTime'] = 4 # 设置第10row的weight
T.edges[192, 191, 0]['arcTime'] = 4
T.edges[191, 171, 0]['arcTime'] = 4
T.edges[171, 191, 0]['arcTime'] = 4
for i in range(192, 199):
T.edges[i, i - 1, 0]['arcTime'] = 4
T.edges[i - 1, i, 0]['arcTime'] = 4
T.edges[i, i + 1, 0]['arcTime'] = 4
T.edges[i + 1, i, 0]['arcTime'] = 4
T.edges[i, i - 20, 0]['arcTime'] = 4
T.edges[i - 20, i, 0]['arcTime'] = 4
T.edges[199, 198, 0]['arcTime'] = 4
T.edges[198, 199, 0]['arcTime'] = 4
T.edges[199, 179, 0]['arcTime'] = 4
T.edges[179, 199, 0]['arcTime'] = 4
T.edges[202, 203, 0]['arcTime'] = 2 # 设置lower-right的行程时间
T.edges[203, 202, 0]['arcTime'] = 2
T.edges[202, 222, 0]['arcTime'] = 2 # 设置第11row的weight
T.edges[222, 202, 0]['arcTime'] = 2
for i in range(203, 210):
T.edges[i, i - 1, 0]['arcTime'] = 2
T.edges[i - 1, i, 0]['arcTime'] = 2
T.edges[i, i + 1, 0]['arcTime'] = 2
T.edges[i + 1, i, 0]['arcTime'] = 2
T.edges[i, i + 20, 0]['arcTime'] = 2
T.edges[i + 20, i, 0]['arcTime'] = 2
T.edges[210, 209, 0]['arcTime'] = 2
T.edges[209, 210, 0]['arcTime'] = 2
T.edges[210, 230, 0]['arcTime'] = 2
T.edges[230, 210, 0]['arcTime'] = 2
for j in range(222, 362, 20): # 设置第12row到第18row的weight
T.edges[j, j + 1, 0]['arcTime'] = 2
T.edges[j + 1, j, 0]['arcTime'] = 2
T.edges[j, j - 20, 0]['arcTime'] = 2
T.edges[j - 20, j, 0]['arcTime'] = 2
T.edges[j, j + 20, 0]['arcTime'] = 2
T.edges[j + 20, j, 0]['arcTime'] = 2
for i in range(j + 1, j + 8):
T.edges[i, i - 1, 0]['arcTime'] = 2
T.edges[i - 1, i, 0]['arcTime'] = 2
T.edges[i, i + 1, 0]['arcTime'] = 2
T.edges[i + 1, i, 0]['arcTime'] = 2
T.edges[i, i - 20, 0]['arcTime'] = 2
T.edges[i - 20, i, 0]['arcTime'] = 2
T.edges[i, i + 20, 0]['arcTime'] = 2
T.edges[i + 20, i, 0]['arcTIme'] = 2
T.edges[j + 8, j + 8 - 1, 0]['arcTime'] = 2
T.edges[j + 8 - 1, j + 8, 0]['arcTIme'] = 2
T.edges[j + 8, j + 8 - 1, 0]['arcTime'] = 2
T.edges[j + 8 - 1, j + 8, 0]['arcTime'] = 2
T.edges[j + 8, j + 8 - 20, 0]['arcTime'] = 2
T.edges[j + 8 - 20, j + 8, 0]['arcTime'] = 2
T.edges[362, 363, 0]['arcTime'] = 2 # 设置第19row的weight
T.edges[363, 362, 0]['arcTime'] = 2
T.edges[362, 342, 0]['arcTime'] = 2
T.edges[342, 362, 0]['arcTime'] = 2
for i in range(363, 370):
T.edges[i, i - 1, 0]['arcTime'] = 2
T.edges[i - 1, i, 0]['arcTime'] = 2
T.edges[i, i + 1, 0]['arcTime'] = 2
T.edges[i + 1, i, 0]['arcTime'] = 2
T.edges[i, i - 20, 0]['arcTime'] = 2
T.edges[i - 20, i, 0]['arcTime'] = 2
T.edges[370, 369, 0]['arcTime'] = 2
T.edges[369, 370, 0]['arcTime'] = 2
T.edges[370, 350, 0]['arcTime'] = 2
T.edges[350, 370, 0]['arcTime'] = 2
return T
else:
# manhattan的图对象小弧数据未知
pass
def generate_distribution(self):
"""
对origin和destination进行均匀分布采样
:para num: 产生的观察样本的数量
:return: 返回origin和destination的均匀列表
"""
if self.type == 0:
# <随机分布模块>
origin_observations = [] # 产生均匀分布的origin
for i in range(self.num):
origin_observations.append(round(random.uniform(1, 25)))
destination_observations = [] # 产生均匀分布的destination
for i in range(self.num):
destination_observations.append(round(random.uniform(1, 25)))
origin_destination_observations = [] # 产生均匀分布的origin和destination
for i in range(self.num):
if origin_observations[i] != destination_observations[i]:
origin_destination_observations.append([origin_observations[i], destination_observations[i]])
return origin_destination_observations
elif self.type == 1:
# <随机分布模块>
origin_observations = [] # 产生均匀分布的origin
for i in range(self.num):
origin_observations.append(round(random.uniform(1, 400)))
destination_observations = [] # 产生均匀分布的destination
for i in range(self.num):
destination_observations.append(round(random.uniform(1, 400)))
origin_destination_observations = [] # 产生均匀分布的origin和destination
for i in range(self.num):
if origin_observations[i] != destination_observations[i]:
origin_destination_observations.append([origin_observations[i], destination_observations[i]])
return origin_destination_observations
else:
# 真实数据不需要生成仿真数据
pass
def lognormal_distribution(self, origin, destination):
T = self.True_Graph()
travelTime, path = self.modified_dijkstras(T, origin, destination)
mu = math.log(travelTime)
return random.lognormvariate(mu, self.sigma)
def get_observations(self): # get_observations是一个生成器
"""Return a generator that yields observation objects"""
origin_destination_observations = self.generate_distribution()
for i in range(len(origin_destination_observations)):
traveltime = self.lognormal_distribution(origin_destination_observations[i][0],
origin_destination_observations[i][1])
yield World.Observation(traveltime, origin_destination_observations[i][0],
origin_destination_observations[i][1])
def project(self, G, lng, lat):
"""
将某个点的坐标按照欧式距离映射到网络中最近的拓扑点上
:Param G: 拓扑图
:Param lng: 经度
:Param lat: 纬度
:Return: 返回最近的点的OSMid
"""
nearest_node = None
shortest_distance = inf
for n, d in G.nodes(data=True):
# d['x']是经度,d['y']是纬度
new_shortest_distance = ox.distance.euclidean_dist_vec(lng, lat, d['x'], d['y'])
if new_shortest_distance < shortest_distance:
nearest_node = n
shortest_distance = new_shortest_distance
return nearest_node, shortest_distance
def get_df_observations(self):
"""
将观察的样本数据存到同级文件夹data中的observed_data.csv文件中,并读取成dataframe格式
:return: 返回观察的样本数据的dataframe格式
"""
if self.type == 0:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'small_synthetic_observed_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_observed_data = pd.read_csv("../train_dataset/small_synthetic_observed_data.csv")
return df_observed_data
elif self.type == 1:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'normal_synthetic_observed_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_observed_data = pd.read_csv("../train_dataset/normal_synthetic_observed_data.csv")
return df_observed_data
else:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# 将network对象转换成geodatafram对象
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/dataset.csv")
df_dataset['dist'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[1] +
self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist'] <= 0.002]
df_dataset.to_csv("../train_dataset/processed_dataset.csv")
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/processed_dataset.csv")
# 注意axis=1的使用
df_dataset['pickup_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[0], axis=1)
df_dataset['dropoff_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[0], axis=1)
# d['x']是经度, d['y']是纬度
df_dataset['projected_pickup_longitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['x'],
axis=1)
df_dataset['projected_pickup_latitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['y'],
axis=1)
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['projected_pickup_longitude']), float(row['projected_pickup_latitude'])),
axis=1)
# 转换dataframe成goedataframe
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'real_observed_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin_osmid,destination_osmid\n')
for i in range(len(df_dataset_geo)):
if df_dataset_geo.iloc[i, 11] != df_dataset_geo.iloc[i, 12] and df_dataset_geo.iloc[
i, 11] / 60 >= 1 and df_dataset_geo.iloc[i, 11] / 60 <= 60:
f.write('{0},{1},{2}\n'.format(df_dataset_geo.iloc[i, 11] / 60, df_dataset_geo.iloc[i, 13],
df_dataset_geo.iloc[i, 14]))
df_observed_data = pd.read_csv("../train_dataset/real_observed_data.csv")
return df_observed_data
def get_train_dataset(self):
"""
将观察的样本数据存到同级文件夹data中的observed_data.csv文件中,并读取成dataframe格式
:return: 返回观察的样本数据的dataframe格式
"""
if self.type == 0:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'small_train_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_train_data = pd.read_csv("../train_dataset/small_train_data.csv")
return df_train_data
elif self.type == 1:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'normal_train_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_train_data = pd.read_csv("../train_dataset/normal_train_data.csv")
return df_train_data
else:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# 将network对象转换成geodatafram对象
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/train_dataset.csv")
df_dataset['dist'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[1] +
self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist'] <= 0.002]
df_dataset.to_csv("../train_dataset/processed_dataset.csv")
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/processed_dataset.csv")
# 注意axis=1的使用
df_dataset['pickup_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[0], axis=1)
df_dataset['dropoff_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[0], axis=1)
# d['x']是经度, d['y']是纬度
df_dataset['projected_pickup_longitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['x'],
axis=1)
df_dataset['projected_pickup_latitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['y'],
axis=1)
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['projected_pickup_longitude']), float(row['projected_pickup_latitude'])),
axis=1)
# 转换dataframe成goedataframe
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'real_train_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin_osmid,destination_osmid\n')
for i in range(len(df_dataset_geo)):
if df_dataset_geo.iloc[i, 11] != df_dataset_geo.iloc[i, 12] and df_dataset_geo.iloc[
i, 11] / 60 >= 1 and df_dataset_geo.iloc[i, 11] / 60 <= 60:
f.write('{0},{1},{2}\n'.format(df_dataset_geo.iloc[i, 11] / 60, df_dataset_geo.iloc[i, 13],
df_dataset_geo.iloc[i, 14]))
df_train_data = pd.read_csv("../train_dataset/real_train_data.csv")
return df_train_data
def modified_dijkstras(self, G, origin, destination):
"""
最短路算法
:return: 返回一个traveltime和path
"""
count = 0
paths_and_distances = {}
for node in G.nodes():
paths_and_distances[node] = [inf, [origin]]
paths_and_distances[origin][0] = 0
vertices_to_explore = [(0, origin)]
while vertices_to_explore:
current_distance, current_vertex = heappop(vertices_to_explore)
for neighbor in G.neighbors(current_vertex):
edge_weight = G.get_edge_data(current_vertex, neighbor, 0)['arcTime']
new_distance = current_distance + edge_weight
new_path = paths_and_distances[current_vertex][1] + [neighbor]
if new_distance < paths_and_distances[neighbor][0]:
paths_and_distances[neighbor][0] = new_distance
paths_and_distances[neighbor][1] = new_path
heappush(vertices_to_explore, (new_distance, neighbor))
count += 1
return paths_and_distances[destination]
def Graph(self):
"""
加载初始化人工网络
:return: 返回一个加载好的的图G对象
"""
if self.type == 0:
# <载入文件模块>
df_nodelist = pd.read_csv('../train_dataset/smallnodelist.csv')
df_edgelist = pd.read_csv('../train_dataset/smalledgelist.csv')
G = nx.MultiDiGraph() # 初始化图并载入点和边模块
G.add_nodes_from(df_nodelist['node']) # 添加点auto
G.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络weight模块>
# 搜索nodes和edges一个是一个key,另一个是两个key
# 设置点对象的x和y坐标,方便自动生成geometry
for u, d in G.nodes(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
d['y'] = u_lat
d['x'] = u_lng
# d['y'] = 0
# d['x'] = 0
# 双向车道,因此这是一个多重图
for u, v, d in G.edges(data=True): # 设置outside的行程时间
G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in G.edges(data=True):
G.edges[u, v, 0]['distance'] = 1
# 设置图对象的crs
G.graph['crs'] = "epsg:4326"
return G
elif self.type == 1:
# <载入文件模块>
df_nodelist = pd.read_csv('../train_dataset/normalnodelist.csv')
df_edgelist = pd.read_csv('../train_dataset/normaledgelist.csv')
G = nx.MultiDiGraph() # 初始化图并载入点和边模块
G.add_nodes_from(df_nodelist['node']) # 添加点auto
G.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络weight模块>
# 搜索nodes和edges一个是一个key,另一个是两个key
# 设置点对象的x和y坐标,方便自动生成geometry
for u, d in G.nodes(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
d['y'] = u_lat
d['x'] = u_lng
# d['y'] = 0
# d['x'] = 0
# 双向车道,因此这是一个多重图
for u, v, d in G.edges(data=True): # 设置outside的行程时间
G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in G.edges(data=True):
G.edges[u, v, 0]['distance'] = 1
# 设置图对象的crs
G.graph['crs'] = "epsg:4326"
return G
else:
# <载入文件模块>
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# <设置人工网络weight模块>
# 多重无向图与无向图添加权重的方式不同,d就是属性字典,无向图中G.edges[u,v]是字典而多重无向图G.edges[u,v]不是
for u, v, d in G.edges(data=True): # 设置outside的行程时间
G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in G.edges(data=True):
G.edges[u, v, 0]['distance'] = 1
return G
def optimization_method(self, G, K):
"""
SOCP优化算法
:para G: 初始化得到的或上一次迭代计算得到的网络图
:para K: path set
:return: 更新过弧行程时间的网络图
"""
if self.type == 0:
# <读取数据>
df_observed_data = pd.read_csv('../train_dataset/small_synthetic_observed_data.csv')
W = df_observed_data # 有旅行时间数据的origin,destination集合:观察集合W
E = G.edges # 所有的小弧的集合:arc集合E
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
# <定义模型>
m = Model("SOCP model")
# <定义参数>
time_limit = self.time_limit
reg = self.reg # 需要针对问题规模灵活选择
# <定义自变量>
names = locals()
# 变量1:t_ij
for node1, node2, temp in E: # 定义小弧的行程时间估计变量t_ij
names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='arc_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量2:T_hat
for i in range(W.shape[0]): # 定义旅行的行程时间估计变量T^hat
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='trip_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量3:x_od
for i in range(W.shape[0]): # 定义行程时间估计的误差x_od
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='error_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
for node1, node2, temp in E: # 定义绝对值线性化
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
names['abs_' + 'node1_' + str(node2) + '_node2_' + str(node1)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node2) + '_node2_' + str(
node1))
# <定义数据结构>
# 数据结构1:P
P = defaultdict(list) # 使用上一次迭代产生的路段行程时间计算本次迭代优化模型的最短路向量
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
P['node1_' + str(origin) + '_node2_' + str(destination)] = \
self.modified_dijkstras(G, origin, destination)[1]
# 数据结构2:K
for key, val in P.items(): # W中观察点的路径集合
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
K['node1_' + str(origin) + '_node2_' + str(destination)].append(val)
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(
df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
# <定义约束>
# 11b约束
for i in range(df_observed_data.shape[0]): # 添加最短路约束
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
traveltime, path = self.modified_dijkstras(G, origin, destination)
arcSum = 0
for i in range(len(path) - 1):
node1 = int(path[i])
node2 = int(path[i + 1])
arcSum += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(names['trip_' + 'node1_' + str(origin) + '_node2_' + str(
destination)] == arcSum) # 添加最短路径行程时间等于旅行的行程时间估计变量的线性约束
# 11c约束
if K:
for key, val in K.items():
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
for path in val:
othertime = 0
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i + 1]
othertime += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(
othertime >= names['trip_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) # 符号反了
# 11d约束
for i in range(W.shape[0]): # 添加误差最小的线性约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
m.addConstr(names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] / M[
'observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
# 11e约束
for i in range(W.shape[0]): # # 添加误差最小的范数约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
qexpr1 = names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] - names[
'error_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
qexpr2 = 2 * np.sqrt(M['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
qexpr3 = names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] + names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addQConstr(qexpr1 * qexpr1 + qexpr2 * qexpr2 <= qexpr3 * qexpr3)
# 11f约束
for node1, node2, temp in E: # 加速度限制的线性约束
m.addConstr(names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= time_limit)
# <定义目标函数>
obj = 0
# 添加loss项
for i in range(W.shape[0]):
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
n_od = len(O['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
obj += names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * n_od
# 添加惩罚项
for node1, node2, temp in E:
for node3, node4, temp in E:
# 列表求交集,判断连续弧
arc1 = [node1, node2]
arc2 = [node3, node4]
intersection = list(set(arc1) & set(arc2))
if intersection:
arc1 = names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
arc2 = names['arc_' + 'node1_' + str(node3) + '_node2_' + str(node4)]
dis1 = G.edges[node1, node2, 0]['distance']
dis2 = G.edges[node3, node4, 0]['distance']
m.addConstr(
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= arc1 / dis1 - arc2 / dis2)
m.addConstr(names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= -(
arc1 / dis1 - arc2 / dis2))
obj += reg * names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * 2 / (dis1 + dis2)
# 添加目标函数
m.setObjective(obj)
# <求解模型>
m.optimize()
# print('最优值:',m.objVal)
# for v in m.getVars():
# print("参数", v.varName,'=',v.x)
# <更新结果>
for v in m.getVars():
string = v.varName.split('_')
node1 = int(string[2])
node2 = int(string[4])
if 'arc' in v.varName: # 将arc_node1_num_node2_num的weight更新
G.edges[node1, node2, 0]['arcTime'] = v.x
return G, K, P
elif self.type == 1:
# <读取数据>
df_observed_data = pd.read_csv('../train_dataset/normal_synthetic_observed_data.csv')
W = df_observed_data # 有旅行时间数据的origin,destination集合:观察集合W
E = G.edges # 所有的小弧的集合:arc集合E
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
# <定义模型>
m = Model("SOCP model")
# <定义参数>
time_limit = self.time_limit
reg = self.reg # 需要针对问题规模灵活选择
# <定义自变量>
names = locals()
# 变量1:t_ij
for node1, node2, temp in E: # 定义小弧的行程时间估计变量t_ij
names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='arc_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量2:T_hat
for i in range(W.shape[0]): # 定义旅行的行程时间估计变量T^hat
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='trip_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量3:x_od
for i in range(W.shape[0]): # 定义行程时间估计的误差x_od
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='error_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
for node1, node2, temp in E: # 定义绝对值线性化
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# <定义数据结构>
# 数据结构1:P
P = defaultdict(list) # 使用上一次迭代产生的路段行程时间计算本次迭代优化模型的最短路向量
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
P['node1_' + str(origin) + '_node2_' + str(destination)] = \
self.modified_dijkstras(G, origin, destination)[1]
# 数据结构2:K
for i in range(W.shape[0]): # W中观察点的路径集合
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
K['node1_' + str(origin) + '_node2_' + str(destination)].append(
self.modified_dijkstras(G, origin, destination)[1])
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(
df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
# <定义约束>
# 11b约束
for i in range(df_observed_data.shape[0]): # 添加最短路约束
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
traveltime, path = self.modified_dijkstras(G, origin, destination)
arcSum = 0
for i in range(len(path) - 1):
node1 = int(path[i])
node2 = int(path[i + 1])
arcSum += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(names['trip_' + 'node1_' + str(origin) + '_node2_' + str(
destination)] == arcSum) # 添加最短路径行程时间等于旅行的行程时间估计变量的线性约束
# 11c约束
if K:
for key, val in K.items():
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
for path in val:
othertime = 0
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i + 1]
othertime += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(
othertime >= names['trip_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) # 符号反了
# 11d约束
for i in range(W.shape[0]): # 添加误差最小的线性约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
m.addConstr(names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] / M[
'observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
# 11e约束
for i in range(W.shape[0]): # # 添加误差最小的范数约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
qexpr1 = names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] - names[
'error_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
qexpr2 = 2 * np.sqrt(M['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
qexpr3 = names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] + names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addQConstr(qexpr1 * qexpr1 + qexpr2 * qexpr2 <= qexpr3 * qexpr3)
# 11f约束
for node1, node2, temp in E: # 加速度限制的线性约束
m.addConstr(names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= time_limit)
# <定义目标函数>
obj = 0
# 添加loss项
for i in range(W.shape[0]):
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
n_od = len(O['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
obj += names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * n_od
# 添加惩罚项
for node1, node2, temp in E:
for node3, node4, temp in E:
# 列表求交集,判断连续弧
arc1 = [node1, node2]
arc2 = [node3, node4]
intersection = list(set(arc1) & set(arc2))
if intersection:
arc1 = names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
arc2 = names['arc_' + 'node1_' + str(node3) + '_node2_' + str(node4)]
dis1 = G.edges[node1, node2, 0]['distance']
dis2 = G.edges[node3, node4, 0]['distance']
obj += reg * names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * 2 / (dis1 + dis2)
m.addConstr(
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= arc1 / dis1 - arc2 / dis2)
m.addConstr(names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= -(
arc1 / dis1 - arc2 / dis2))
# 添加目标函数
m.setObjective(obj, gurobipy.GRB.MINIMIZE)
# <求解模型>
m.optimize()
# print('最优值:',m.objVal)
# for v in m.getVars():
# print("参数", v.varName,'=',v.x)
# <更新结果>
for v in m.getVars():
string = v.varName.split('_')
node1 = int(string[2])
node2 = int(string[4])
if 'arc' in v.varName: # 将arc_node1_num_node2_num的weight更新
G.edges[node1, node2, 0]['arcTime'] = v.x
return G, K, P
else:
# <读取数据>
df_observed_data = pd.read_csv('../train_dataset/real_observed_data.csv')
W = df_observed_data # 有旅行时间数据的origin,destination集合:观察集合W
E = G.edges # 所有的小弧的集合:arc集合E
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
# <定义模型>
m = Model("SOCP model")
# <定义参数>
time_limit = self.time_limit
reg = self.reg # 需要针对问题规模灵活选择
# <定义自变量>
names = locals()
# 变量1:t_ij
for node1, node2, temp in E: # 定义小弧的行程时间估计变量t_ij
if temp == 0:
names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='arc_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量2:T_hat
for i in range(W.shape[0]): # 定义旅行的行程时间估计变量T^hat
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='trip_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量3:x_od
for i in range(W.shape[0]): # 定义行程时间估计的误差x_od
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='error_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
for node1, node2, temp in E: # 定义绝对值线性化
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# <定义数据结构>
# 数据结构1:P
P = defaultdict(list) # 使用上一次迭代产生的路段行程时间计算本次迭代优化模型的最短路向量
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
P['node1_' + str(origin) + '_node2_' + str(destination)] = \
self.modified_dijkstras(G, origin, destination)[1]
# 数据结构2:K
for i in range(W.shape[0]): # W中观察点的路径集合
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
K['node1_' + str(origin) + '_node2_' + str(destination)].append(
self.modified_dijkstras(G, origin, destination)[1])
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(int(W.iloc[i][0]))
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
# <定义约束>
# 11b约束
for i in range(W.shape[0]): # 添加最短路约束
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
traveltime, path = self.modified_dijkstras(G, origin, destination)
arcSum = 0
for i in range(len(path) - 1):
node1 = int(path[i])
node2 = int(path[i + 1])
arcSum += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(names['trip_' + 'node1_' + str(origin) + '_node2_' + str(
destination)] == arcSum) # 添加最短路径行程时间等于旅行的行程时间估计变量的线性约束
# 11c约束
if K:
for key, val in K.items():
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
for path in val:
othertime = 0
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i + 1]
othertime += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(
othertime >= names['trip_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) # 符号反了
# 11d约束
for i in range(W.shape[0]): # 添加误差最小的线性约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
m.addConstr(names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] / M[
'observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
# 11e约束
for i in range(W.shape[0]): # # 添加误差最小的范数约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
qexpr1 = names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] - names[
'error_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
qexpr2 = 2 * np.sqrt(M['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
qexpr3 = names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] + names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addQConstr(qexpr1 * qexpr1 + qexpr2 * qexpr2 <= qexpr3 * qexpr3)
# # 11f约束
# for node1,node2,temp in E: # 加速度限制的线性约束,无解有可能是time_limit的问题
# m.addConstr(names['arc_'+ 'node1_'+str(node1) +'_node2_'+ str(node2)] >= time_limit)
# <定义目标函数>
obj = 0
# 添加loss项
for i in range(W.shape[0]):
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
n_od = len(O['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
obj += names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * n_od
# 添加惩罚项
for node1, node2, temp in E:
for node3, node4, temp in E:
# 列表求交集,判断连续弧
arc1 = [node1, node2]
arc2 = [node3, node4]
intersection = list(set(arc1) & set(arc2))
if intersection:
arc1 = names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
arc2 = names['arc_' + 'node1_' + str(node3) + '_node2_' + str(node4)]
dis1 = G.edges[node1, node2, 0]['distance']
dis2 = G.edges[node3, node4, 0]['distance']
obj += reg * names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * 2 / (dis1 + dis2)
m.addConstr(
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= arc1 / dis1 - arc2 / dis2)
m.addConstr(names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= -(
arc1 / dis1 - arc2 / dis2))
# 添加目标函数
m.setObjective(obj, gurobipy.GRB.MINIMIZE)
# <求解模型>
m.optimize()
# print('最优值:',m.objVal)
# for v in m.getVars():
# print("参数", v.varName,'=',v.x)
# <更新结果>
for v in m.getVars():
string = v.varName.split('_')
node1 = int(string[2])
node2 = int(string[4])
if 'arc' in v.varName: # 将arc_node1_num_node2_num的weight更新
G.edges[node1, node2, 0]['arcTime'] = v.x
return G, K, P
def diff(self, lastP, P):
count = 0
G = self.Graph()
arc_lastP = defaultdict(list)
for key, val in lastP.items(): # lastP {'node1_num_node2_num':[node1,node2]}
for i in range(len(val) - 1):
origin = val[i]
destination = val[i + 1]
arc_lastP[key].append(str(origin) + str(destination)) # {"node1_num_node2_num": [arc1,arc2]}
arc_P = defaultdict(list)
for key, val in P.items():
for i in range(len(val) - 1):
origin = val[i]
destination = val[i + 1]
arc_P[key].append(str(origin) + str(destination))
for key, val in arc_lastP.items(): # {'origin,destination':[arc1,arc2]}
for arc in val:
if arc not in arc_P[key]:
count += 1
for key, val in arc_P.items():
for arc in val:
if arc not in arc_lastP[key]:
count += 1
return count / len(lastP)
def RMLSB(self, G):
"""
定义一个评价函数,对比小弧之间的误差,仿真数据有真实弧数据,而真实数据中通过与其他算法对比获取gap
G: 训练好的图对象
test_dataset: 输入测试集,测试集的数据是没有经过训练过的
"""
RMLSB = 0
if self.type == 0:
train_dataset = "../train_dataset/small_synthetic_observed_data.csv"
elif self.type == 1:
train_dataset = "../train_dataset/normal_synthetic_observed_data.csv"
else:
train_dataset = "../train_dataset/real_observed_data"
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
df_observed_data = pd.read_csv(train_dataset)
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
for origin in G.nodes():
for destination in G.nodes():
if origin != destination and int(
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) != 0:
observe = M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]
trip = self.modified_dijkstras(G, origin, destination)[0]
print(observe, trip)
RMLSB += math.pow((math.log(trip) - math.log(observe)), 2)
return np.sqrt(RMLSB)
def geo(self, G):
if self.type == 0:
# 载入文件模块
df_nodelist = pd.read_csv('../train_dataset/smallnodelist.csv')
edgelist = []
for u, v, d in G.edges(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
v_lng = df_nodelist[df_nodelist.node == v].values.squeeze()[1]
v_lat = df_nodelist[df_nodelist.node == v].values.squeeze()[2]
G.edges[u, v, 0]['geometry'] = LineString([(u_lng, u_lat), (v_lng, v_lat)])
edge_data = dict()
edge_data['node1'] = u
edge_data['node2'] = v
edge_data.update(d)
edgelist.append(edge_data)
df_edgelist = pd.DataFrame(edgelist)
edgelist_crs = {'init': 'epsg:4326'}
df_edgelist_geo = gpd.GeoDataFrame(df_edgelist, crs=edgelist_crs, geometry=df_edgelist.geometry)
return df_edgelist_geo
elif self.type == 1:
# 载入文件模块
df_nodelist = pd.read_csv('../train_dataset/normalnodelist.csv')
edgelist = []
for u, v, d in G.edges(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
v_lng = df_nodelist[df_nodelist.node == v].values.squeeze()[1]
v_lat = df_nodelist[df_nodelist.node == v].values.squeeze()[2]
G.edges[u, v, 0]['geometry'] = LineString([(u_lng, u_lat), (v_lng, v_lat)])
edge_data = dict()
edge_data['node1'] = u
edge_data['node2'] = v
edge_data.update(d)
edgelist.append(edge_data)
df_edgelist = pd.DataFrame(edgelist)
edgelist_crs = {'init': 'epsg:4326'}
df_edgelist_geo = gpd.GeoDataFrame(df_edgelist, crs=edgelist_crs, geometry=df_edgelist.geometry)
return df_edgelist_geo
else:
# 绘图模块
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
return gdf_edges
def train(self):
if self.type == 0:
start_time = time.time()
# 程序起始
# a tracktable algorithm
K = defaultdict(list)
self.get_df_observations()
difference = inf
G = self.Graph()
T = self.True_Graph()
count = 0
while difference >= 0.5:
self.geo(G).plot(column='arcTime', cmap='RdYlGn')
G, K, P = self.optimization_method(G, K)
if count % 2 == 0:
lastP1 = P
else:
lastP2 = P
if count != 0:
difference = self.diff(lastP1, lastP2)
count += 1
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
gdf_nodes.to_file("../smalldata/gdf_nodes" + str(count) + ".geojson", driver="GeoJSON")
gdf_edges.to_file("../smalldata/gdf_edges" + str(count) + ".geojson", driver="GeoJSON")
print(f'正在进行第{count}次迭代,误差为{difference}.')
RMLSB = self.RMLSB(G)
print(f'优化模型当前的RMLSB为{RMLSB}')
# 程序结束
elapsed_time = time.time() - start_time
hour = elapsed_time // 3600
minute = (elapsed_time - hour * 3600) // 60
second = elapsed_time % 60
print(f'inference time cost: {hour} hours, {minute} minutes,{second} seconds')
elif self.type == 1:
start_time = time.time()
# 程序起始
# a tracktable algorithm
K = defaultdict(list)
self.get_df_observations()
difference = inf
G = self.Graph()
T = self.True_Graph()
count = 0
while difference >= 0.5:
self.geo(G).plot(column='arcTime', cmap='RdYlGn')
G, K, P = self.optimization_method(G, K)
if count % 2 == 0:
lastP1 = P
else:
lastP2 = P
if count != 0:
difference = self.diff(lastP1, lastP2)
count += 1
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
gdf_nodes.to_file("../normaldata/gdf_nodes" + str(count) + ".geojson", driver="GeoJSON")
gdf_edges.to_file("../normaldata/gdf_edges" + str(count) + ".geojson", driver="GeoJSON")
print(f'正在进行第{count}次迭代,误差为{difference}.')
RMLSB = self.RMLSB(G)
print(f'优化模型当前的RMLSB为{RMLSB}')
# 程序结束
elapsed_time = time.time() - start_time
hour = elapsed_time // 3600
minute = (elapsed_time - hour * 3600) // 60
second = elapsed_time % 60
print(f'inference time cost: {hour} hours, {minute} minutes,{second} seconds')
else:
start_time = time.time()
# 程序起始
# a tracktable algorithm
K = defaultdict(list)
self.get_df_observations()
difference = inf
G = self.Graph()
count = 0
while difference >= 0.5:
# 第k次迭代
fig, ax = plt.subplots(figsize=(30, 30))
self.geo(G).plot(ax=ax, column='arcTime', cmap='Paired', categorical=True)
ax.set_axis_off()
plt.show()
G, K, P = self.optimization_method(G, K)
if count % 2 == 0:
lastP1 = P
else:
lastP2 = P
if count != 0:
difference = self.diff(lastP1, lastP2)
count += 1
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# 使用apply函数清洗不同的数据类型的列
gdf_edges['osmid'] = gdf_edges.apply(lambda row: 0 if type(row['osmid']) == list else row['osmid'],
axis=1)
gdf_edges = gdf_edges[gdf_edges['osmid'] > 0]
gdf_nodes.to_file("../realdata/gdf_nodes" + str(count) + ".geojson", driver="GeoJSON")
gdf_edges.to_file("../realdata/gdf_edges" + str(count) + ".geojson", driver="GeoJSON")
print(f'正在进行第{count}次迭代,误差为{difference}.')
# 程序结束
elapsed_time = time.time() - start_time
hour = elapsed_time // 3600
minute = (elapsed_time - hour * 3600) // 60
second = elapsed_time % 60
print(f'inference time cost: {hour} hours, {minute} minutes,{second} seconds')
def test(self, G):
"""
G: 输入训练好的图模型
test_dataset: 输入测试集,与训练集不同
"""
if self.type == 0:
test_dataset = "../test_dataset/small_train_data.csv"
elif self.type == 1:
test_dataset = "../test_dataset/normal_train_data.csv"
else:
test_dataset = "../test_dataset/real_train_data.csv"
RMLSB = 0
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
df_observed_data = pd.read_csv(test_dataset)
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
for origin in G.nodes():
for destination in G.nodes():
if origin != destination and int(
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) != 0:
observe = M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]
trip = self.modified_dijkstras(G, origin, destination)[0]
RMLSB += math.pow((math.log(trip) - math.log(observe)), 2)
return np.sqrt(RMLSB)
class Visualization:
def __init__(self, G, type=0, manual=True):
self.G = G
self.type = type
self.manual = manual
def Graph(self):
"""
加载初始化人工网络
:return: 返回一个加载好的的图G对象
"""
# <设置人工网络weight模块>
# 多重无向图与无向图添加权重的方式不同,d就是属性字典,无向图中G.edges[u,v]是字典而多重无向图G.edges[u,v]不是
for u, v, d in self.G.edges(data=True): # 设置outside的行程时间
self.G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in self.G.edges(data=True):
self.G.edges[u, v, 0]['distance'] = 1
return self.G
def project(self, G, lng, lat):
"""
将某个点的坐标按照欧式距离映射到网络中最近的拓扑点上
:Param G: 拓扑图
:Param lng: 经度
:Param lat: 纬度
:Return: 返回最近的点的OSMid
"""
nearest_node = None
shortest_distance = inf
for n, d in G.nodes(data=True):
# d['x']是经度,d['y']是纬度
new_shortest_distance = ox.distance.euclidean_dist_vec(lng, lat, d['x'], d['y'])
if new_shortest_distance < shortest_distance:
nearest_node = n
shortest_distance = new_shortest_distance
return nearest_node, shortest_distance
def modified_dijkstras(self, origin, destination):
"""
最短路算法
:return: 返回一个traveltime和path
"""
count = 0
paths_and_distances = {}
for node in self.G.nodes():
paths_and_distances[node] = [inf, [origin]]
paths_and_distances[origin][0] = 0
vertices_to_explore = [(0, origin)]
while vertices_to_explore:
current_distance, current_vertex = heappop(vertices_to_explore)
for neighbor in self.G.neighbors(current_vertex):
# get_edge_data得到的是嵌套字典
edge_weight = self.G.get_edge_data(current_vertex, neighbor)[0]['arcTime']
new_distance = current_distance + edge_weight
new_path = paths_and_distances[current_vertex][1] + [neighbor]
if new_distance < paths_and_distances[neighbor][0]:
paths_and_distances[neighbor][0] = new_distance
paths_and_distances[neighbor][1] = new_path
heappush(vertices_to_explore, (new_distance, neighbor))
count += 1
return paths_and_distances[destination]
def plot_path_evolution(G):
plt.show()
def plot_taxi_position(self, map=True, kind=0):
if map == False:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
df_dataset = pd.read_csv("../train_dataset/dataset.csv")
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['pickup_longitude']), float(row['pickup_latitude'])), axis=1)
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
fig, ax = plt.subplots(figsize=(30, 30))
df_dataset_geo.plot(ax=ax, color='green', markersize=1)
gdf_edges.plot(ax=ax, cmap='Reds')
ax.set_axis_off()
plt.show()
else:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# 将network对象转换成geodatafram对象
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
df_dataset = | pd.read_csv("../train_dataset/dataset.csv") | pandas.read_csv |
# -*- coding: utf-8 -*
'''问卷数据分析工具包
Created on Tue Nov 8 20:05:36 2016
@author: JSong
1、针对问卷星数据,编写并封装了很多常用算法
2、利用report工具包,能将数据直接导出为PPTX
该工具包支持一下功能:
1、编码问卷星、问卷网等数据
2、封装描述统计和交叉分析函数
3、支持生成一份整体的报告和相关数据
'''
import os
import re
import sys
import math
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .. import report as rpt
from .. import associate
__all__=['read_code',
'save_code',
'spec_rcode',
'dataText_to_code',
'dataCode_to_text',
'var_combine',
'wenjuanwang',
'wenjuanxing',
'load_data',
'read_data',
'save_data',
'data_merge',
'clean_ftime',
'data_auto_code',
'qdata_flatten',
'sample_size_cal',
'confidence_interval',
'gof_test',
'chi2_test',
'fisher_exact',
'anova',
'mca',
'cluster',
'scatter',
'sankey',
'qtable',
'association_rules',
'contingency',
'cross_chart',
'summary_chart',
'onekey_gen',
'scorpion']
#=================================================================
#
#
# 【问卷数据处理】
#
#
#==================================================================
def read_code(filename):
'''读取code编码文件并输出为字典格式
1、支持json格式
2、支持本包规定的xlsx格式
see alse to_code
'''
file_type=os.path.splitext(filename)[1][1:]
if file_type == 'json':
import json
code=json.load(filename)
return code
d=pd.read_excel(filename,header=None)
d=d[d.any(axis=1)]#去除空行
d.fillna('NULL',inplace=True)
d=d.as_matrix()
code={}
for i in range(len(d)):
tmp=d[i,0].strip()
if tmp == 'key':
# 识别题号
code[d[i,1]]={}
key=d[i,1]
elif tmp in ['qlist','code_order']:
# 识别字典值为列表的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp2=list(d[i:j,1])
# 列表中字符串的格式化,去除前后空格
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
elif tmp in ['code','code_r']:
# 识别字典值为字典的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
# 识别其他的列表字段
elif (tmp!='NULL') and (d[i,2]=='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
if i==len(d)-1:
code[key][tmp]=d[i,1]
else:
tmp2=list(d[i:j,1])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
# 识别其他的字典字段
elif (tmp!='NULL') and (d[i,2]!='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
elif tmp == 'NULL':
continue
else:
code[key][tmp]=d[i,1]
return code
def save_code(code,filename='code.xlsx'):
'''code本地输出
1、输出为json格式,根据文件名自动识别
2、输出为Excel格式
see also read_code
'''
save_type=os.path.splitext(filename)[1][1:]
if save_type == 'json':
code=pd.DataFrame(code)
code.to_json(filename,force_ascii=False)
return
tmp=pd.DataFrame(columns=['name','value1','value2'])
i=0
if all(['Q' in c[0] for c in code.keys()]):
key_qlist=sorted(code,key=lambda c:int(re.findall('\d+',c)[0]))
else:
key_qlist=code.keys()
for key in key_qlist:
code0=code[key]
tmp.loc[i]=['key',key,'']
i+=1
#print(key)
for key0 in code0:
tmp2=code0[key0]
if (type(tmp2) == list) and tmp2:
tmp.loc[i]=[key0,tmp2[0],'']
i+=1
for ll in tmp2[1:]:
tmp.loc[i]=['',ll,'']
i+=1
elif (type(tmp2) == dict) and tmp2:
try:
tmp2_key=sorted(tmp2,key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp2_key=list(tmp2.keys())
j=0
for key1 in tmp2_key:
if j==0:
tmp.loc[i]=[key0,key1,tmp2[key1]]
else:
tmp.loc[i]=['',key1,tmp2[key1]]
i+=1
j+=1
else:
if tmp2:
tmp.loc[i]=[key0,tmp2,'']
i+=1
if sys.version>'3':
tmp.to_excel(filename,index=False,header=False)
else:
tmp.to_csv(filename,index=False,header=False,encoding='utf-8')
'''问卷数据导入和编码
对每一个题目的情形进行编码:题目默认按照Q1、Q2等给出
Qn.content: 题目内容
Qn.qtype: 题目类型,包含:单选题、多选题、填空题、排序题、矩阵单选题等
Qn.qlist: 题目列表,例如多选题对应着很多小题目
Qn.code: dict,题目选项编码
Qn.code_r: 题目对应的编码(矩阵题目专有)
Qn.code_order: 题目类别的顺序,用于PPT报告的生成[一般后期添加]
Qn.name: 特殊类型,包含:城市题、NPS题等
Qn.weight:dict,每个选项的权重
'''
def dataText_to_code(df,sep,qqlist=None):
'''编码文本数据
'''
if sep in [';','┋']:
qtype='多选题'
elif sep in ['-->','→']:
qtype='排序题'
if not qqlist:
qqlist=df.columns
# 处理多选题
code={}
for qq in qqlist:
tmp=df[qq].map(lambda x : x.split(sep) if isinstance(x,str) else [])
item_list=sorted(set(tmp.sum()))
if qtype == '多选题':
tmp=tmp.map(lambda x: [int(t in x) for t in item_list])
code_tmp={'code':{},'qtype':u'多选题','qlist':[],'content':qq}
elif qtype == '排序题':
tmp=tmp.map(lambda x:[x.index(t)+1 if t in x else np.nan for t in item_list])
code_tmp={'code':{},'qtype':u'排序题','qlist':[],'content':qq}
for i,t in enumerate(item_list):
column_name='{}_A{:.0f}'.format(qq,i+1)
df[column_name]=tmp.map(lambda x:x[i])
code_tmp['code'][column_name]=item_list[i]
code_tmp['qlist']=code_tmp['qlist']+[column_name]
code[qq]=code_tmp
df.drop(qq,axis=1,inplace=True)
return df,code
def dataCode_to_text(df,code=None):
'''将按序号数据转换成文本
'''
if df.max().max()>1:
sep='→'
else:
sep='┋'
if code:
df=df.rename(code)
qlist=list(df.columns)
df['text']=np.nan
if sep in ['┋']:
for i in df.index:
w=df.loc[i,:]==1
df.loc[i,'text']=sep.join(list(w.index[w]))
elif sep in ['→']:
for i in df.index:
w=df.loc[i,:]
w=w[w>=1].sort_values()
df.loc[i,'text']=sep.join(list(w.index))
df.drop(qlist,axis=1,inplace=True)
return df
def var_combine(data,code,qq1,qq2,sep=',',qnum_new=None,qname_new=None):
'''将两个变量组合成一个变量
例如:
Q1:'性别',Q2: 年龄
组合后生成:
1、男_16~19岁
2、男_20岁~40岁
3、女_16~19岁
4、女_20~40岁
'''
if qnum_new is None:
if 'Q'==qq2[0]:
qnum_new=qq1+'_'+qq2[1:]
else:
qnum_new=qq1+'_'+qq2
if qname_new is None:
qname_new=code[qq1]['content']+'_'+code[qq2]['content']
if code[qq1]['qtype']!='单选题' or code[qq2]['qtype']!='单选题':
print('只支持组合两个单选题,请检查.')
raise
d1=data[code[qq1]['qlist'][0]]
d2=data[code[qq2]['qlist'][0]]
sm=max(code[qq1]['code'].keys())# 进位制
sn=max(code[qq2]['code'].keys())# 进位制
if isinstance(sm,str) or isinstance(sn,str):
print('所选择的两个变量不符合函数要求.')
raise
data[qnum_new]=(d1-1)*sn+d2
code[qnum_new]={'qtype':'单选题','qlist':[qnum_new],'content':qname_new}
code_tmp={}
for c1 in code[qq1]['code']:
for c2 in code[qq2]['code']:
cc=(c1-1)*sn+c2
value='{}{}{}'.format(code[qq1]['code'][c1],sep,code[qq2]['code'][c2])
code_tmp[cc]=value
code[qnum_new]['code']=code_tmp
print('变量已合并,新变量题号为:{}'.format(qnum_new))
return data,code
def wenjuanwang(filepath='.\\data',encoding='gbk'):
'''问卷网数据导入和编码
输入:
filepath:
列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件
文件夹路径,函数会自动在文件夹下搜寻相关数据
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
filename3=filepath[2]
elif os.path.isdir(filepath):
filename1=os.path.join(filepath,'All_Data_Readable.csv')
filename2=os.path.join(filepath,'All_Data_Original.csv')
filename3=os.path.join(filepath,'code.csv')
else:
print('can not dection the filepath!')
d1=pd.read_csv(filename1,encoding=encoding)
d1.drop([u'答题时长'],axis=1,inplace=True)
d2=pd.read_csv(filename2,encoding=encoding)
d3=pd.read_csv(filename3,encoding=encoding,header=None,na_filter=False)
d3=d3.as_matrix()
# 遍历code.csv,获取粗略的编码,暂缺qlist,矩阵单选题的code_r
code={}
for i in range(len(d3)):
if d3[i,0]:
key=d3[i,0]
code[key]={}
code[key]['content']=d3[i,1]
code[key]['qtype']=d3[i,2]
code[key]['code']={}
code[key]['qlist']=[]
elif d3[i,2]:
tmp=d3[i,1]
if code[key]['qtype'] in [u'多选题',u'排序题']:
tmp=key+'_A'+'%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist'].append(tmp)
elif code[key]['qtype'] in [u'单选题']:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist']=[key]
elif code[key]['qtype'] in [u'填空题']:
code[key]['qlist']=[key]
else:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
# 更新矩阵单选的code_r和qlist
qnames_Readable=list(d1.columns)
qnames=list(d2.columns)
for key in code.keys():
qlist=[]
for name in qnames:
if re.match(key+'_',name) or key==name:
qlist.append(name)
if ('qlist' not in code[key]) or (not code[key]['qlist']):
code[key]['qlist']=qlist
if code[key]['qtype'] in [u'矩阵单选题']:
tmp=[qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]
code_r=[re.findall('_([^_]*?)$',t)[0] for t in tmp]
code[key]['code_r']=dict(zip(code[key]['qlist'],code_r))
# 处理时间格式
d2['start']=pd.to_datetime(d2['start'])
d2['finish']=pd.to_datetime(d2['finish'])
tmp=d2['finish']-d2['start']
tmp=tmp.astype(str).map(lambda x:60*int(re.findall(':(\d+):',x)[0])+int(re.findall(':(\d+)\.',x)[0]))
ind=np.where(d2.columns=='finish')[0][0]
d2.insert(int(ind)+1,u'答题时长(秒)',tmp)
return (d2,code)
def wenjuanxing(filepath='.\\data',headlen=6):
'''问卷星数据导入和编码
输入:
filepath:
列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本
文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\d+_\d+_0.xls和\d+_\d+_2.xls
headlen: 问卷星数据基础信息的列数
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
#filepath='.\\data'
#headlen=6# 问卷从开始到第一道正式题的数目(一般包含序号,提交答卷时间的等等)
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
elif os.path.isdir(filepath):
filelist=os.listdir(filepath)
n1=n2=0
for f in filelist:
s1=re.findall('\d+_\d+_0.xls',f)
s2=re.findall('\d+_\d+_2.xls',f)
if s1:
filename1=s1[0]
n1+=1
if s2:
filename2=s2[0]
n2+=1
if n1+n2==0:
print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')
return
elif n1+n2>2:
print(u'存在多组问卷星数据,请检查.')
return
filename1=os.path.join(filepath,filename1)
filename2=os.path.join(filepath,filename2)
else:
print('can not dection the filepath!')
d1=pd.read_excel(filename1)
d2=pd.read_excel(filename2)
d2.replace({-2:np.nan,-3:np.nan},inplace=True)
#d1.replace({u'(跳过)':np.nan},inplace=True)
code={}
'''
遍历一遍按文本数据,获取题号和每个题目的类型
'''
for name in d1.columns[headlen:]:
tmp=re.findall(u'^(\d{1,3})[、::]',name)
# 识别多选题、排序题
if tmp:
new_name='Q'+tmp[0]
current_name='Q'+tmp[0]
code[new_name]={}
content=re.findall(u'\d{1,3}[、::](.*)',name)
code[new_name]['content']=content[0]
d1.rename(columns={name:new_name},inplace=True)
code[new_name]['qlist']=[]
code[new_name]['code']={}
code[new_name]['qtype']=''
code[new_name]['name']=''
qcontent=str(list(d1[new_name]))
# 单选题和多选题每个选项都可能有开放题,得识别出来
if ('〖' in qcontent) and ('〗' in qcontent):
code[new_name]['qlist_open']=[]
if '┋' in qcontent:
code[new_name]['qtype']=u'多选题'
elif '→' in qcontent:
code[new_name]['qtype']=u'排序题'
# 识别矩阵单选题
else:
tmp2=re.findall(u'^第(\d{1,3})题\(.*?\)',name)
if tmp2:
new_name='Q'+tmp2[0]
else:
pass
if new_name not in code.keys():
j=1
current_name=new_name
new_name=new_name+'_R%s'%j
code[current_name]={}
code[current_name]['content']=current_name+'(问卷星数据中未找到题目具体内容)'
code[current_name]['qlist']=[]
code[current_name]['code']={}
code[current_name]['code_r']={}
code[current_name]['qtype']=u'矩阵单选题'
code[current_name]['name']=''
#code[current_name]['sample_len']=0
d1.rename(columns={name:new_name},inplace=True)
else:
j+=1
new_name=new_name+'_R%s'%j
d1.rename(columns={name:new_name},inplace=True)
#raise Exception(u"can not dection the NO. of question.")
#print('can not dection the NO. of question')
#print(name)
#pass
# 遍历按序号数据,完整编码
d2qlist=d2.columns[6:].tolist()
for name in d2qlist:
tmp1=re.findall(u'^(\d{1,3})[、::]',name)# 单选题和填空题
tmp2=re.findall(u'^第(.*?)题',name)# 多选题、排序题和矩阵单选题
if tmp1:
current_name='Q'+tmp1[0]# 当前题目的题号
d2.rename(columns={name:current_name},inplace=True)
code[current_name]['qlist'].append(current_name)
#code[current_name]['sample_len']=d2[current_name].count()
ind=d2[current_name].copy()
ind=ind.notnull()
c1=d1.loc[ind,current_name].unique()
c2=d2.loc[ind,current_name].unique()
#print('========= %s========'%current_name)
if (c2.dtype == object) or ((list(c1)==list(c2)) and len(c2)>=min(15,len(d2[ind]))) or (len(c2)>50):
code[current_name]['qtype']=u'填空题'
else:
code[current_name]['qtype']=u'单选题'
#code[current_name]['code']=dict(zip(c2,c1))
if 'qlist_open' in code[current_name].keys():
tmp=d1[current_name].map(lambda x: re.findall('〖(.*?)〗',x)[0] if re.findall('〖(.*?)〗',x) else '')
ind_open=np.argwhere(d2.columns.values==current_name).tolist()[0][0]
d2.insert(ind_open+1,current_name+'_open',tmp)
d1[current_name]=d1[current_name].map(lambda x: re.sub('〖.*?〗','',x))
#c1=d1.loc[ind,current_name].map(lambda x: re.sub('〖.*?〗','',x)).unique()
code[current_name]['qlist_open']=[current_name+'_open']
#c2_tmp=d2.loc[ind,current_name].map(lambda x: int(x) if (('%s'%x!='nan') and not(isinstance(x,str)) and (int(x)==x)) else x)
code[current_name]['code']=dict(zip(d2.loc[ind,current_name],d1.loc[ind,current_name]))
#code[current_name]['code']=dict(zip(c2,c1))
elif tmp2:
name0='Q'+tmp2[0]
# 新题第一个选项
if name0 != current_name:
j=1#记录多选题的小题号
current_name=name0
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
code[current_name]['code']=dict(zip(c2,c1))
#print(dict(zip(c2,c1)))
else:
name1='Q'+tmp2[0]+'_A%s'%j
#code[current_name]['sample_len']=d2[name].notnull().sum()
else:
j+=1#记录多选题的小题号
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
old_dict=code[current_name]['code'].copy()
new_dict=dict(zip(c2,c1))
old_dict.update(new_dict)
code[current_name]['code']=old_dict.copy()
else:
name1='Q'+tmp2[0]+'_A%s'%j
code[current_name]['qlist'].append(name1)
d2.rename(columns={name:name1},inplace=True)
tmp3=re.findall(u'第.*?题\((.*)\)',name)[0]
if code[current_name]['qtype'] == u'矩阵单选题':
code[current_name]['code_r'][name1]=tmp3
else:
code[current_name]['code'][name1]=tmp3
# 识别开放题
if (code[current_name]['qtype'] == u'多选题'):
openq=tmp3+'〖.*?〗'
openq=re.sub('\)','\)',openq)
openq=re.sub('\(','\(',openq)
openq=re.compile(openq)
qcontent=str(list(d1[current_name]))
if re.findall(openq,qcontent):
tmp=d1[current_name].map(lambda x: re.findall(openq,x)[0] if re.findall(openq,x) else '')
ind=np.argwhere(d2.columns.values==name1).tolist()[0][0]
d2.insert(ind+1,name1+'_open',tmp)
code[current_name]['qlist_open'].append(name1+'_open')
# 删除字典中的nan
keys=list(code[current_name]['code'].keys())
for key in keys:
if '%s'%key == 'nan':
del code[current_name]['code'][key]
# 处理一些特殊题目,给它们的选项固定顺序,例如年龄、收入等
for k in code.keys():
content=code[k]['content']
qtype=code[k]['qtype']
if ('code' in code[k]) and (code[k]['code']!={}):
tmp1=code[k]['code'].keys()
tmp2=code[k]['code'].values()
# 识别选项是否是有序变量
tmp3=[len(re.findall('\d+','%s'%v))>0 for v in tmp2]#是否有数字
tmp4=[len(re.findall('-|~','%s'%v))>0 for v in tmp2]#是否有"-"或者"~"
if (np.array(tmp3).sum()>=len(tmp2)-2) or (np.array(tmp4).sum()>=len(tmp2)*0.8-(1e-17)):
try:
tmp_key=sorted(code[k]['code'],key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp_key=list(tmp1)
code_order=[code[k]['code'][v] for v in tmp_key]
code[k]['code_order']=code_order
# 识别矩阵量表题
if qtype=='矩阵单选题':
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if (set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10])) and (len(tmp3)==len(tmp2)):
code[k]['weight']=dict(zip(tmp1,tmp3))
continue
# 识别特殊题型
if ('性别' in content) and ('男' in tmp2) and ('女' in tmp2):
code[k]['name']='性别'
if ('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2):
code[k]['name']='性别'
if (('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum()>=len(tmp2)-1):
code[k]['name']='年龄'
if ('满意度' in content) and ('整体' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='满意度'
if len(tmp3)==len(tmp2):
code[k]['weight']=dict(zip(tmp1,tmp3))
if ('意愿' in content) and ('推荐' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='NPS'
if len(tmp3)==len(tmp2):
weight=pd.Series(dict(zip(tmp1,tmp3)))
weight=weight.replace(dict(zip([0,1,2,3,4,5,6,7,8,9,10],[-100,-100,-100,-100,-100,-100,-100,0,0,100,100])))
code[k]['weight']=weight.to_dict()
try:
d2[u'所用时间']=d2[u'所用时间'].map(lambda s: int(s[:-1]))
except:
pass
return (d2,code)
def load_data(method='filedialog',**kwargs):
'''导入问卷数据
# 暂时只支持已编码的和问卷星数据
1、支持路径搜寻
2、支持自由选择文件
method:
-filedialog: 打开文件窗口选择
-pathsearch:自带搜索路径,需提供filepath
'''
if method=='filedialog':
import tkinter as tk
from tkinter.filedialog import askopenfilenames
tk.Tk().withdraw();
#print(u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)')
if 'initialdir' in kwargs:
initialdir=kwargs['initialdir']
elif os.path.isdir('.\\data'):
initialdir = ".\\data"
else:
initialdir = "."
title =u"请选择编码所需要的数据文件(支持问卷星和已编码好的数据)"
filetypes = (("Excel files","*.xls;*.xlsx"),("CSV files","*.csv"),("all files","*.*"))
filenames=[]
while len(filenames)<1:
filenames=askopenfilenames(initialdir=initialdir,title=title,filetypes=filetypes)
if len(filenames)<1:
print('请至少选择一个文件.')
filenames=list(filenames)
elif method == 'pathsearch':
if 'filepath' in kwargs:
filepath=kwargs['filepath']
else :
filepath='.\\data\\'
if os.path.isdir(filepath):
filenames=os.listdir(filepath)
filenames=[os.path.join(filepath,s) for s in filenames]
else:
print('搜索路径错误')
raise
info=[]
for filename in filenames:
filename_nopath=os.path.split(filename)[1]
data=read_data(filename)
# 第一列包含的字段
field_c1=set(data.iloc[:,0].dropna().unique())
field_r1=set(data.columns)
# 列名是否包含Q
hqlen=[len(re.findall('^[qQ]\d+',c))>0 for c in field_r1]
hqrate=hqlen.count(True)/len(field_r1) if len(field_r1)>0 else 0
rowlens,collens=data.shape
# 数据中整数/浮点数的占比
rate_real=data.applymap(lambda x:isinstance(x,(int,float))).sum().sum()/rowlens/collens
tmp={'filename':filename_nopath,'filenametype':'','rowlens':rowlens,'collens':collens,\
'field_c1':field_c1,'field_r1':field_r1,'type':'','rate_real':rate_real}
if len(re.findall('^data.*\.xls',filename_nopath))>0:
tmp['filenametype']='data'
elif len(re.findall('^code.*\.xls',filename_nopath))>0:
tmp['filenametype']='code'
elif len(re.findall('\d+_\d+_\d.xls',filename_nopath))>0:
tmp['filenametype']='wenjuanxing'
if tmp['filenametype']=='code' or set(['key','code','qlist','qtype']) < field_c1:
tmp['type']='code'
if tmp['filenametype']=='wenjuanxing' or len(set(['序号','提交答卷时间','所用时间','来自IP','来源','来源详情','总分'])&field_r1)>=5:
tmp['type']='wenjuanxing'
if tmp['filenametype']=='data' or hqrate>=0.5:
tmp['type']='data'
info.append(tmp)
questype=[k['type'] for k in info]
# 这里有一个优先级存在,优先使用已编码好的数据,其次是问卷星数据
if questype.count('data')*questype.count('code')==1:
data=read_data(filenames[questype.index('data')])
code=read_code(filenames[questype.index('code')])
elif questype.count('wenjuanxing')>=2:
filenames=[(f,info[i]['rate_real']) for i,f in enumerate(filenames) if questype[i]=='wenjuanxing']
tmp=[]
for f,rate_real in filenames:
t2=0 if rate_real<0.5 else 2
d=pd.read_excel(f)
d=d.iloc[:,0]
tmp.append((t2,d))
#print('添加{}'.format(t2))
tmp_equal=0
for t,d0 in tmp[:-1]:
if len(d)==len(d0) and all(d==d0):
tmp_equal+=1
tmp[-1]=(t2+int(t/10)*10,tmp[-1][1])
max_quesnum=max([int(t/10) for t,d in tmp])
if tmp_equal==0:
tmp[-1]=(tmp[-1][0]+max_quesnum*10+10,tmp[-1][1])
#print('修改为{}'.format(tmp[-1][0]))
# 重新整理所有的问卷数据
questype=[t for t,d in tmp]
filenames=[f for f,r in filenames]
quesnums=max([int(t/10) for t in questype])#可能存在的数据组数
filename_wjx=[]
for i in range(1,quesnums+1):
if questype.count(i*10)==1 and questype.count(i*10+2)==1:
filename_wjx.append([filenames[questype.index(i*10)],filenames[questype.index(i*10+2)]])
if len(filename_wjx)==1:
data,code=wenjuanxing(filename_wjx[0])
elif len(filename_wjx)>1:
print('脚本识别出多组问卷星数据,请选择需要编码的数据:')
for i,f in enumerate(filename_wjx):
print('{}: {}'.format(i+1,'/'.join([os.path.split(f[0])[1],os.path.split(f[1])[1]])))
ii=input('您选择的数据是(数据前的编码,如:1):')
ii=re.sub('\s','',ii)
if ii.isnumeric():
data,code=wenjuanxing(filename_wjx[int(ii)-1])
else:
print('您输入正确的编码.')
else:
print('没有找到任何问卷数据..')
raise
else:
print('没有找到任何数据')
raise
return data,code
def spec_rcode(data,code):
city={'北京':0,'上海':0,'广州':0,'深圳':0,'成都':1,'杭州':1,'武汉':1,'天津':1,'南京':1,'重庆':1,'西安':1,'长沙':1,'青岛':1,'沈阳':1,'大连':1,'厦门':1,'苏州':1,'宁波':1,'无锡':1,\
'福州':2,'合肥':2,'郑州':2,'哈尔滨':2,'佛山':2,'济南':2,'东莞':2,'昆明':2,'太原':2,'南昌':2,'南宁':2,'温州':2,'石家庄':2,'长春':2,'泉州':2,'贵阳':2,'常州':2,'珠海':2,'金华':2,\
'烟台':2,'海口':2,'惠州':2,'乌鲁木齐':2,'徐州':2,'嘉兴':2,'潍坊':2,'洛阳':2,'南通':2,'扬州':2,'汕头':2,'兰州':3,'桂林':3,'三亚':3,'呼和浩特':3,'绍兴':3,'泰州':3,'银川':3,'中山':3,\
'保定':3,'西宁':3,'芜湖':3,'赣州':3,'绵阳':3,'漳州':3,'莆田':3,'威海':3,'邯郸':3,'临沂':3,'唐山':3,'台州':3,'宜昌':3,'湖州':3,'包头':3,'济宁':3,'盐城':3,'鞍山':3,'廊坊':3,'衡阳':3,\
'秦皇岛':3,'吉林':3,'大庆':3,'淮安':3,'丽江':3,'揭阳':3,'荆州':3,'连云港':3,'张家口':3,'遵义':3,'上饶':3,'龙岩':3,'衢州':3,'赤峰':3,'湛江':3,'运城':3,'鄂尔多斯':3,'岳阳':3,'安阳':3,\
'株洲':3,'镇江':3,'淄博':3,'郴州':3,'南平':3,'齐齐哈尔':3,'常德':3,'柳州':3,'咸阳':3,'南充':3,'泸州':3,'蚌埠':3,'邢台':3,'舟山':3,'宝鸡':3,'德阳':3,'抚顺':3,'宜宾':3,'宜春':3,'怀化':3,\
'榆林':3,'梅州':3,'呼伦贝尔':3,'临汾':4,'南阳':4,'新乡':4,'肇庆':4,'丹东':4,'德州':4,'菏泽':4,'九江':4,'江门市':4,'黄山':4,'渭南':4,'营口':4,'娄底':4,'永州市':4,'邵阳':4,'清远':4,\
'大同':4,'枣庄':4,'北海':4,'丽水':4,'孝感':4,'沧州':4,'马鞍山':4,'聊城':4,'三明':4,'开封':4,'锦州':4,'汉中':4,'商丘':4,'泰安':4,'通辽':4,'牡丹江':4,'曲靖':4,'东营':4,'韶关':4,'拉萨':4,\
'襄阳':4,'湘潭':4,'盘锦':4,'驻马店':4,'酒泉':4,'安庆':4,'宁德':4,'四平':4,'晋中':4,'滁州':4,'衡水':4,'佳木斯':4,'茂名':4,'十堰':4,'宿迁':4,'潮州':4,'承德':4,'葫芦岛':4,'黄冈':4,'本溪':4,\
'绥化':4,'萍乡':4,'许昌':4,'日照':4,'铁岭':4,'大理州':4,'淮南':4,'延边州':4,'咸宁':4,'信阳':4,'吕梁':4,'辽阳':4,'朝阳':4,'恩施州':4,'达州市':4,'益阳市':4,'平顶山':4,'六安':4,'延安':4,\
'梧州':4,'白山':4,'阜阳':4,'铜陵市':4,'河源':4,'玉溪市':4,'黄石':4,'通化':4,'百色':4,'乐山市':4,'抚州市':4,'钦州':4,'阳江':4,'池州市':4,'广元':4,'滨州':5,'阳泉':5,'周口市':5,'遂宁':5,\
'吉安':5,'长治':5,'铜仁':5,'鹤岗':5,'攀枝花':5,'昭通':5,'云浮':5,'伊犁州':5,'焦作':5,'凉山州':5,'黔西南州':5,'广安':5,'新余':5,'锡林郭勒':5,'宣城':5,'兴安盟':5,'红河州':5,'眉山':5,\
'巴彦淖尔':5,'双鸭山市':5,'景德镇市':5,'鸡西':5,'三门峡':5,'宿州':5,'汕尾':5,'阜新':5,'张掖':5,'玉林':5,'乌兰察布':5,'鹰潭':5,'黑河':5,'伊春':5,'贵港市':5,'漯河':5,'晋城':5,'克拉玛依':5,\
'随州':5,'保山':5,'濮阳':5,'文山州':5,'嘉峪关':5,'六盘水':5,'乌海':5,'自贡':5,'松原':5,'内江':5,'黔东南州':5,'鹤壁':5,'德宏州':5,'安顺':5,'资阳':5,'鄂州':5,'忻州':5,'荆门':5,'淮北':5,\
'毕节':5,'巴音郭楞':5,'防城港':5,'天水':5,'黔南州':5,'阿坝州':5,'石嘴山':5,'安康':5,'亳州市':5,'昌吉州':5,'普洱':5,'楚雄州':5,'白城':5,'贺州':5,'哈密':5,'来宾':5,'庆阳':5,'河池':5,\
'张家界 雅安':5,'辽源':5,'湘西州':5,'朔州':5,'临沧':5,'白银':5,'塔城地区':5,'莱芜':5,'迪庆州':5,'喀什地区':5,'甘孜州':5,'阿克苏':5,'武威':5,'巴中':5,'平凉':5,'商洛':5,'七台河':5,'金昌':5,\
'中卫':5,'阿勒泰':5,'铜川':5,'海西州':5,'吴忠':5,'固原':5,'吐鲁番':5,'阿拉善盟':5,'博尔塔拉州':5,'定西':5,'西双版纳':5,'陇南':5,'大兴安岭':5,'崇左':5,'日喀则':5,'临夏州':5,'林芝':5,\
'海东':5,'怒江州':5,'和田地区':5,'昌都':5,'儋州':5,'甘南州':5,'山南':5,'海南州':5,'海北州':5,'玉树州':5,'阿里地区':5,'那曲地区':5,'黄南州':5,'克孜勒苏州':5,'果洛州':5,'三沙':5}
code_keys=list(code.keys())
for qq in code_keys:
qlist=code[qq]['qlist']
#qtype=code[qq]['qtype']
content=code[qq]['content']
ind=list(data.columns).index(qlist[-1])
data1=data[qlist]
'''
识别问卷星中的城市题
'''
tf1=u'城市' in content
tf2=data1[data1.notnull()].applymap(lambda x:'-' in '%s'%x).all().all()
tf3=(qq+'a' not in data.columns) and (qq+'b' not in data.columns)
if tf1 and tf2 and tf3:
# 省份和城市
tmp1=data[qq].map(lambda x:x.split('-')[0])
tmp2=data[qq].map(lambda x:x.split('-')[1])
tmp2[tmp1==u'上海']=u'上海'
tmp2[tmp1==u'北京']=u'北京'
tmp2[tmp1==u'天津']=u'天津'
tmp2[tmp1==u'重庆']=u'重庆'
tmp2[tmp1==u'香港']=u'香港'
tmp2[tmp1==u'澳门']=u'澳门'
data.insert(ind+1,qq+'a',tmp1)
data.insert(ind+2,qq+'b',tmp2)
code[qq+'a']={'content':'省份','qtype':'填空题','qlist':[qq+'a']}
code[qq+'b']={'content':'城市','qtype':'填空题','qlist':[qq+'b']}
tmp3=data[qq+'b'].map(lambda x: city[x] if x in city.keys() else x)
tmp3=tmp3.map(lambda x: 6 if isinstance(x,str) else x)
data.insert(ind+3,qq+'c',tmp3)
code[qq+'c']={'content':'城市分级','qtype':'单选题','qlist':[qq+'c'],\
'code':{0:'北上广深',1:'新一线',2:'二线',3:'三线',4:'四线',5:'五线',6:'五线以下'}}
return data,code
def levenshtein(s, t):
''''' From Wikipedia article; Iterative with two matrix rows. '''
if s == t: return 0
elif len(s) == 0: return len(t)
elif len(t) == 0: return len(s)
v0 = [None] * (len(t) + 1)
v1 = [None] * (len(t) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(s)):
v1[0] = i + 1
for j in range(len(t)):
cost = 0 if s[i] == t[j] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(t)]
def code_similar(code1,code2):
'''
题目内容相似度用最小编辑距离来度量
选项相似度分为几种
1、完全相同:1
2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2
2、多选题/排序题:不考虑序号,共同变量超过一半即可:3
3、矩阵单选题:code_r 暂时只考虑完全匹配
4、其他情况为0
'''
code_distance_min=pd.DataFrame(index=code1.keys(),columns=['qnum','similar_content','similar_code'])
for c1 in code1:
# 计算题目内容的相似度
disstance_str=pd.Series(index=code2.keys())
for c2 in code2:
if code1[c1]['qtype']==code2[c2]['qtype']:
disstance_str[c2]=levenshtein(code1[c1]['content'], code2[c2]['content'])
c2=disstance_str.idxmin()
if '%s'%c2 == 'nan':
continue
min_len=(len(code1[c1]['content'])+len(code2[c2]['content']))/2
similar_content=100-100*disstance_str[c2]/min_len if min_len>0 else 0
# 计算选项的相似度
qtype=code2[c2]['qtype']
if qtype == '单选题':
t1=code1[c1]['code']
t2=code2[c2]['code']
inner_key=list(set(t1.keys())&set(t2.keys()))
tmp=all([t1[c]==t2[c] for c in inner_key])
if t1==t2:
similar_code=1
elif len(inner_key)>=0.5*len(set(t1.keys())|set(t2.keys())) and tmp:
similar_code=2
else:
similar_code=0
elif qtype in ['多选题','排序题']:
t1=code1[c1]['code']
t2=code2[c2]['code']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['矩阵多选题']:
t1=code1[c1]['code_r']
t2=code2[c2]['code_r']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['填空题']:
similar_code=1
else:
similar_code=0
code_distance_min.loc[c1,'qnum']=c2
code_distance_min.loc[c1,'similar_content']=similar_content
code_distance_min.loc[c1,'similar_code']=similar_code
# 剔除qnum中重复的值
code_distance_min=code_distance_min.sort_values(['qnum','similar_content','similar_code'],ascending=[False,False,True])
code_distance_min.loc[code_distance_min.duplicated(['qnum']),:]=np.nan
code_distance_min=pd.DataFrame(code_distance_min,index=code1.keys())
return code_distance_min
def data_merge(ques1,ques2,qlist1=None,qlist2=None,name1='ques1',name2='ques2',\
mergeqnum='Q0',similar_threshold=70):
'''合并两份数据
ques1: 列表,[data1,code1]
ques2: 列表,[data2,code2]
'''
data1,code1=ques1
data2,code2=ques2
if (qlist1 is None) or (qlist2 is None):
qlist1=[]
qlist2=[]
qqlist1=[]
qqlist2=[]
code_distance_min=code_similar(code1,code2)
code1_key=sorted(code1,key=lambda x:int(re.findall('\d+',x)[0]))
for c1 in code1_key:
qtype1=code1[c1]['qtype']
#print('{}:{}'.format(c1,code1[c1]['content']))
rs_qq=code_distance_min.loc[c1,'qnum']
similar_content=code_distance_min.loc[c1,'similar_content']
similar_code=code_distance_min.loc[c1,'similar_code']
if (similar_content>=similar_threshold) and (similar_code in [1,2]):
#print('推荐合并第二份数据中的{}({}), 两个题目相似度为为{:.0f}%'.format(rs_qq,code2[rs_qq]['content'],similar))
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
user_qq=rs_qq
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(rs_qq)
elif (similar_content>=similar_threshold) and (similar_code==3):
# 针对非单选题,此时要调整选项顺序
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t1_qlist=code1[c1]['qlist']
t1_value=[t1[k] for k in t1_qlist]
t2=code2[rs_qq]['code_r'] if qtype1 =='矩阵单选题' else code2[rs_qq]['code']
t2_qlist=code2[rs_qq]['qlist']
t2_value=[t2[k] for k in t2_qlist]
# 保留相同的选项
t1_qlist_new=[q for q in t1_qlist if t1[q] in list(set(t1_value)&set(t2_value))]
t2_r=dict(zip([s[1] for s in t2.items()],[s[0] for s in t2.items()]))
t2_qlist_new=[t2_r[s] for s in [t1[q] for q in t1_qlist_new]]
code1[c1]['qlist']=t1_qlist_new
code1[c1]['code']={k:t1[k] for k in t1_qlist_new}
qqlist1+=t1_qlist_new
qqlist2+=t2_qlist_new
qlist1.append(c1)
qlist2.append(rs_qq)
print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1,rs_qq))
elif similar_code in [1,2]:
print('-'*40)
print('为【 {}:{} 】自动匹配到: '.format(c1,code1[c1]['content']))
print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq,code2[rs_qq]['content'],similar_content))
tmp=input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['yes','y']:
user_qq=rs_qq
elif tmp in ['no','n']:
user_qq=None
else:
tmp=re.sub('^q','Q',tmp)
if tmp not in code2:
user_qq=None
elif (tmp in code2) and (tmp!=rs_qq):
print('您输入的是{}:{}'.format(tmp,code2[tmp]['content']))
user_qq=tmp
if user_qq==rs_qq:
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
elif user_qq is not None:
# 比对两道题目的code
if 'code' in code1[c1] and len(code1[c1]['code'])>0:
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t2=code2[user_qq]['code_r'] if code2[user_qq]['qtype'] =='矩阵单选题' else code2[user_qq]['code']
if set(t1.values())==set(t2.values()):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('两个题目的选项不匹配,将自动跳过.')
else:
qqlist1+=[code1[c1]['qlist'][0]]
qqlist2+=[code2[user_qq]['qlist'][0]]
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('将自动跳过: {}'.format(c1))
print('-'*40)
else:
print('将自动跳过: {}'.format(c1))
tmp=input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['no','n']:
print('请确保接下来您要合并的题目类型和选项完全一样.')
while 1:
tmp=input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')
tmp=re.sub('\s','',tmp)# 去掉空格
tmp=re.sub(',',',',tmp)# 修正可能错误的逗号
tmp=tmp.split(',')
tmp=[re.sub('^q','Q',qq) for qq in tmp]
if len(tmp)<2:
break
if tmp[0] in qlist1 or tmp[1] in qlist2:
print('该题已经被合并,请重新输入')
continue
if tmp[0] not in code1 or tmp[1] not in code2:
print('输入错误, 请重新输入')
continue
c1=tmp[0]
c2=tmp[1]
print('您输入的是:')
print('第一份数据中的【 {}:{} 】'.format(c1,code1[c1]['content']))
print('第二份数据中的【 {}:{} 】'.format(c2,code2[c2]['content']))
w=code_similar({c1:code1[c1]},{c2:code2[c2]})
similar_code=w.loc[c1,'similar_code']
if similar_code in [1,2] and len(code1[c1]['qlist'])==len(code2[c2]['qlist']):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[c2]['qlist']
qlist1.append(c1)
qlist2.append(c2)
print('将自动合并: {} 和 {}'.format(c1,c2))
else:
print('选项不匹配,请重新输入')
else:
qqlist1=[]
for qq in qlist1:
qqlist1=qqlist1+code1[qq]['qlist']
qqlist2=[]
for qq in qlist2:
qqlist2=qqlist2+code2[qq]['qlist']
# 将题号列表转化成data中的列名
if mergeqnum in qqlist1:
mergeqnum=mergeqnum+'merge'
data1=data1.loc[:,qqlist1]
data1.loc[:,mergeqnum]=1
data2=data2.loc[:,qqlist2]
data2.loc[:,mergeqnum]=2
if len(qqlist1)!=len(qqlist2):
print('两份数据选项不完全匹配,请检查....')
raise
data2=data2.rename(columns=dict(zip(qqlist2,qqlist1)))
data12=data1.append(data2,ignore_index=True)
code12={}
for i,cc in enumerate(qlist1):
code12[cc]=code1[cc]
if 'code' in code1[cc] and 'code' in code2[qlist2[i]]:
code12[cc]['code'].update(code2[qlist2[i]]['code'])
code12[mergeqnum]={'content':u'来源','code':{1:name1,2:name2},'qtype':u'单选题','qlist':[mergeqnum]}
return data12,code12
## ===========================================================
#
#
# 数据清洗 #
#
#
## ==========================================================
def clean_ftime(ftime,cut_percent=0.25):
'''
ftime 是完成问卷的秒数
思路:
1、只考虑截断问卷完成时间较小的样本
2、找到完成时间变化的拐点,即需要截断的时间点
返回:r
建议截断<r的样本
'''
t_min=int(ftime.min())
t_cut=int(ftime.quantile(cut_percent))
x=np.array(range(t_min,t_cut))
y=np.array([len(ftime[ftime<=i]) for i in range(t_min,t_cut)])
z1 = np.polyfit(x, y, 4) # 拟合得到的函数
z2=np.polyder(z1,2) #求二阶导数
r=np.roots(np.polyder(z2,1))
r=int(r[0])
return r
## ===========================================================
#
#
# 数据分析和输出 #
#
#
## ==========================================================
def data_auto_code(data):
'''智能判断问卷数据
输入
data: 数据框,列名需要满足Qi或者Qi_
输出:
code: 自动编码
'''
data=pd.DataFrame(data)
columns=data.columns
columns=[c for c in columns if re.match('Q\d+',c)]
code={}
for cc in columns:
# 识别题目号
if '_' not in cc:
key=cc
else:
key=cc.split('_')[0]
# 新的题目则产生新的code
if key not in code:
code[key]={}
code[key]['qlist']=[]
code[key]['code']={}
code[key]['content']=key
code[key]['qtype']=''
# 处理各题目列表
if key == cc:
code[key]['qlist']=[key]
elif re.findall('^'+key+'_[a-zA-Z]{0,}\d+$',cc):
code[key]['qlist'].append(cc)
else:
if 'qlist_open' in code[key]:
code[key]['qlist_open'].append(cc)
else:
code[key]['qlist_open']=[cc]
for kk in code.keys():
dd=data[code[kk]['qlist']]
# 单选题和填空题
if len(dd.columns)==1:
tmp=dd[dd.notnull()].iloc[:,0].unique()
if dd.iloc[:,0].value_counts().mean() >=2:
code[kk]['qtype']=u'单选题'
code[kk]['code']=dict(zip(tmp,tmp))
else:
code[kk]['qtype']=u'填空题'
del code[kk]['code']
else:
tmp=set(dd[dd.notnull()].as_matrix().flatten())
if set(tmp)==set([0,1]):
code[kk]['qtype']=u'多选题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
elif 'R' in code[kk]['qlist'][0]:
code[kk]['qtype']=u'矩阵单选题'
code[kk]['code_r']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
code[kk]['code']=dict(zip(list(tmp),list(tmp)))
else:
code[kk]['qtype']=u'排序题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
return code
def save_data(data,filename=u'data.xlsx',code=None):
'''保存问卷数据到本地
根据filename后缀选择相应的格式保存
如果有code,则保存按文本数据
'''
savetype=os.path.splitext(filename)[1][1:]
data1=data.copy()
if code:
for qq in code.keys():
qtype=code[qq]['qtype']
qlist=code[qq]['qlist']
if qtype == u'单选题':
# 将序号换成文本,题号加上具体内容
data1[qlist[0]].replace(code[qq]['code'],inplace=True)
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
elif qtype == u'矩阵单选题':
# 同单选题
data1[code[qq]['qlist']].replace(code[qq]['code'],inplace=True)
tmp1=code[qq]['qlist']
tmp2=['{}({})'.format(q,code[qq]['code_r'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'排序题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
tmp1=code[qq]['qlist']
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'多选题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
for q in qlist:
data1[q].replace({0:'',1:code[qq]['code'][q]},inplace=True)
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in qlist]
data1.rename(columns=dict(zip(qlist,tmp2)),inplace=True)
else:
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
if (savetype == u'xlsx') or (savetype == u'xls'):
data1.to_excel(filename,index=False)
elif savetype == u'csv':
data1.to_csv(filename,index=False)
def read_data(filename):
savetype=os.path.splitext(filename)[1][1:]
if (savetype==u'xlsx') or (savetype==u'xls'):
data=pd.read_excel(filename)
elif savetype==u'csv':
data=pd.read_csv(filename)
else:
print('con not read file!')
return data
def sa_to_ma(data):
'''单选题数据转换成多选题数据
data是单选题数据, 要求非有效列别为nan
可以使用内置函数pd.get_dummies()代替
'''
if isinstance(data,pd.core.frame.DataFrame):
data=data[data.columns[0]]
#categorys=sorted(data[data.notnull()].unique())
categorys=data[data.notnull()].unique()
try:
categorys=sorted(categorys)
except:
pass
#print('sa_to_ma function::cannot sorted')
data_ma=pd.DataFrame(index=data.index,columns=categorys)
for c in categorys:
data_ma[c]=data.map(lambda x : int(x==c))
data_ma.loc[data.isnull(),:]=np.nan
return data_ma
def to_dummpy(data,code,qqlist=None,qtype_new='多选题',ignore_open=True):
'''转化成哑变量
将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题
返回一个很大的只有0和1的数据
'''
if qqlist is None:
qqlist=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))
bdata=pd.DataFrame()
bcode={}
for qq in qqlist:
qtype=code[qq]['qtype']
data0=data[code[qq]['qlist']]
if qtype=='单选题':
data0=data0.iloc[:,0]
categorys=data0[data0.notnull()].unique()
try:
categorys=sorted(categorys)
except :
pass
categorys=[t for t in categorys if t in code[qq]['code']]
cname=[code[qq]['code'][k] for k in categorys]
columns_name=['{}_A{}'.format(qq,i+1) for i in range(len(categorys))]
tmp=pd.DataFrame(index=data0.index,columns=columns_name)
for i,c in enumerate(categorys):
tmp[columns_name[i]]=data0.map(lambda x : int(x==c))
#tmp.loc[data0.isnull(),:]=0
code_tmp={'content':code[qq]['content'],'qtype':qtype_new}
code_tmp['code']=dict(zip(columns_name,cname))
code_tmp['qlist']=columns_name
bcode.update({qq:code_tmp})
bdata=pd.concat([bdata,tmp],axis=1)
elif qtype in ['多选题','排序题','矩阵单选题']:
bdata=pd.concat([bdata,data0],axis=1)
bcode.update({qq:code[qq]})
bdata=bdata.fillna(0)
try:
bdata=bdata.astype(np.int64,raise_on_error=False)
except :
pass
return bdata,bcode
def qdata_flatten(data,code,quesid=None,userid_begin=None):
'''将问卷数据展平,字段如下
userid: 用户ID
quesid: 问卷ID
qnum: 题号
qname: 题目内容
qtype: 题目类型
samplelen:题目的样本数
itemnum: 选项序号
itemname: 选项内容
code: 用户的选择
codename: 用户选择的具体值
count: 计数
percent(%): 计数占比(百分比)
'''
if not userid_begin:
userid_begin=1000000
data.index=[userid_begin+i+1 for i in range(len(data))]
if '提交答卷时间' in data.columns:
begin_date=pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')
end_date=pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')
else:
begin_date=''
end_date=''
data,code=to_dummpy(data,code,qtype_new='单选题')
code_item={}
for qq in code:
if code[qq]['qtype']=='矩阵单选题':
code_item.update(code[qq]['code_r'])
else :
code_item.update(code[qq]['code'])
qdata=data.stack().reset_index()
qdata.columns=['userid','qn_an','code']
qdata['qnum']=qdata['qn_an'].map(lambda x:x.split('_')[0])
qdata['itemnum']=qdata['qn_an'].map(lambda x:'_'.join(x.split('_')[1:]))
if quesid:
qdata['quesid']=quesid
qdata=qdata[['userid','quesid','qnum','itemnum','code']]
else:
qdata=qdata[['userid','qnum','itemnum','code']]
# 获取描述统计信息:
samplelen=qdata.groupby(['userid','qnum'])['code'].sum().map(lambda x:int(x>0)).unstack().sum()
quesinfo=qdata.groupby(['qnum','itemnum','code'])['code'].count()
quesinfo.name='count'
quesinfo=quesinfo.reset_index()
quesinfo=quesinfo[quesinfo['code']!=0]
#quesinfo=qdata.groupby(['quesid','qnum','itemnum'])['code'].sum()
quesinfo['samplelen']=quesinfo['qnum'].replace(samplelen.to_dict())
quesinfo['percent(%)']=0
quesinfo.loc[quesinfo['samplelen']>0,'percent(%)']=100*quesinfo.loc[quesinfo['samplelen']>0,'count']/quesinfo.loc[quesinfo['samplelen']>0,'samplelen']
quesinfo['qname']=quesinfo['qnum'].map(lambda x: code[x]['content'])
quesinfo['qtype']=quesinfo['qnum'].map(lambda x: code[x]['qtype'])
quesinfo['itemname']=quesinfo['qnum']+quesinfo['itemnum'].map(lambda x:'_%s'%x)
quesinfo['itemname']=quesinfo['itemname'].replace(code_item)
#quesinfo['itemname']=quesinfo['qn_an'].map(lambda x: code[x.split('_')[0]]['code_r'][x] if \
#code[x.split('_')[0]]['qtype']=='矩阵单选题' else code[x.split('_')[0]]['code'][x])
# 各个选项的含义
quesinfo['codename']=''
quesinfo.loc[quesinfo['code']==0,'codename']='否'
quesinfo.loc[quesinfo['code']==1,'codename']='是'
quesinfo['tmp']=quesinfo['qnum']+quesinfo['code'].map(lambda x:'_%s'%int(x))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='矩阵单选题'),'tmp']\
.map(lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])]))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='排序题'),'tmp'].map(lambda x: 'Top{}'.format(x.split('_')[1])))
quesinfo['begin_date']=begin_date
quesinfo['end_date']=end_date
if quesid:
quesinfo['quesid']=quesid
quesinfo=quesinfo[['quesid','begin_date','end_date','qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
else:
quesinfo=quesinfo[['qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
# 排序
quesinfo['qnum']=quesinfo['qnum'].astype('category')
quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo['itemnum']=quesinfo['itemnum'].astype('category')
quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo=quesinfo.sort_values(['qnum','itemnum','code'])
return qdata,quesinfo
def confidence_interval(p,n,alpha=0.05):
import scipy.stats as stats
t=stats.norm.ppf(1-alpha/2)
ci=t*math.sqrt(p*(1-p)/n)
#a=p-stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
#b=p+stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
return ci
def sample_size_cal(interval,N,alpha=0.05):
'''调研样本量的计算
参考:https://www.surveysystem.com/sscalc.htm
sample_size_cal(interval,N,alpha=0.05)
输入:
interval: 误差范围,例如0.03
N: 总体的大小,一般1万以上就没啥差别啦
alpha:置信水平,默认95%
'''
import scipy.stats as stats
p=stats.norm.ppf(1-alpha/2)
if interval>1:
interval=interval/100
samplesize=p**2/4/interval**2
if N:
samplesize=samplesize*N/(samplesize+N)
samplesize=int(round(samplesize))
return samplesize
def gof_test(fo,fe=None,alpha=0.05):
'''拟合优度检验
输入:
fo:观察频数
fe:期望频数,缺省为平均数
返回:
1: 样本与总体有差异
0:样本与总体无差异
例子:
gof_test(np.array([0.3,0.4,0.3])*222)
'''
import scipy.stats as stats
fo=np.array(fo).flatten()
C=len(fo)
if not fe:
N=fo.sum()
fe=np.array([N/C]*C)
else:
fe=np.array(fe).flatten()
chi_value=(fo-fe)**2/fe
chi_value=chi_value.sum()
chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)
#CV=np.sqrt((fo-fe)**2/fe**2/(C-1))*100
if chi_value>chi_value_fit:
result=1
else:
result=0
return result
def chi2_test(fo,alpha=0.05):
import scipy.stats as stats
fo=pd.DataFrame(fo)
chiStats = stats.chi2_contingency(observed=fo)
#critical_value = stats.chi2.ppf(q=1-alpha,df=chiStats[2])
#observed_chi_val = chiStats[0]
# p<alpha 等价于 observed_chi_val>critical_value
chi2_data=(chiStats[1] <= alpha,chiStats[1])
return chi2_data
def fisher_exact(fo,alpha=0.05):
'''fisher_exact 显著性检验函数
此处采用的是调用R的解决方案,需要安装包 pyper
python解决方案参见
https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/
但还有些问题,所以没用.
'''
import pyper as pr
r=pr.R(use_pandas=True,use_numpy=True)
r.assign('fo',fo)
r("b<-fisher.test(fo)")
pdata=r['b']
p_value=pdata['p.value']
if p_value<alpha:
result=1
else:
result=0
return (result,p_value)
def anova(data,formula):
'''方差分析
输入
--data: DataFrame格式,包含数值型变量和分类型变量
--formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]
返回[方差分析表]
[总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]
--df:自由度
--sum_sq:误差平方和
--mean_sq:误差平方和/对应的自由度
--F:mean_sq之比
--PR(>F):p值,比如<0.05则代表有显著性差异
'''
import statsmodels.api as sm
from statsmodels.formula.api import ols
cw_lm=ols(formula, data=data).fit() #Specify C for Categorical
r=sm.stats.anova_lm(cw_lm)
return r
def mca(X,N=2):
'''对应分析函数,暂时支持双因素
X:观察频数表
N:返回的维数,默认2维
可以通过scatter函数绘制:
fig=scatter([pr,pc])
fig.savefig('mca.png')
'''
from scipy.linalg import diagsvd
S = X.sum().sum()
Z = X / S # correspondence matrix
r = Z.sum(axis=1)
c = Z.sum()
D_r = np.diag(1/np.sqrt(r))
Z_c = Z - np.outer(r, c) # standardized residuals matrix
D_c = np.diag(1/np.sqrt(c))
# another option, not pursued here, is sklearn.decomposition.TruncatedSVD
P,s,Q = np.linalg.svd(np.dot(np.dot(D_r, Z_c),D_c))
#S=diagsvd(s[:2],P.shape[0],2)
pr=np.dot(np.dot(D_r,P),diagsvd(s[:N],P.shape[0],N))
pc=np.dot(np.dot(D_c,Q.T),diagsvd(s[:N],Q.shape[0],N))
inertia=np.cumsum(s**2)/np.sum(s**2)
inertia=inertia.tolist()
if isinstance(X,pd.DataFrame):
pr=pd.DataFrame(pr,index=X.index,columns=list('XYZUVW')[:N])
pc=pd.DataFrame(pc,index=X.columns,columns=list('XYZUVW')[:N])
return pr,pc,inertia
'''
w=pd.ExcelWriter(u'mca_.xlsx')
pr.to_excel(w,startrow=0,index_label=True)
pc.to_excel(w,startrow=len(pr)+2,index_label=True)
w.save()
'''
def cluster(data,code,cluster_qq,n_clusters='auto',max_clusters=7):
'''对态度题进行聚类
'''
from sklearn.cluster import KMeans
#from sklearn.decomposition import PCA
from sklearn import metrics
#import prince
qq_max=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))[-1]
new_cluster='Q{}'.format(int(re.findall('\d+',qq_max)[0])+1)
#new_cluster='Q32'
qlist=code[cluster_qq]['qlist']
X=data[qlist]
# 去除所有态度题选择的分数都一样的用户(含仅有两个不同)
std_t=min(1.41/np.sqrt(len(qlist)),0.40) if len(qlist)>=8 else 0.10
X=X[X.T.std()>std_t]
index_bk=X.index#备份,方便还原
X.fillna(0,inplace=True)
X1=X.T
X1=(X1-X1.mean())/X1.std()
X1=X1.T.as_matrix()
if n_clusters == 'auto':
#聚类个数的选取和评估
silhouette_score=[]# 轮廊系数
SSE_score=[]
klist=np.arange(2,15)
for k in klist:
est = KMeans(k) # 4 clusters
est.fit(X1)
tmp=np.sum((X1-est.cluster_centers_[est.labels_])**2)
SSE_score.append(tmp)
tmp=metrics.silhouette_score(X1, est.labels_)
silhouette_score.append(tmp)
'''
fig = plt.figure(1)
ax = fig.add_subplot(111)
fig = plt.figure(2)
ax.plot(klist,np.array(silhouette_score))
ax = fig.add_subplot(111)
ax.plot(klist,np.array(SSE_score))
'''
# 找轮廊系数的拐点
ss=np.array(silhouette_score)
t1=[False]+list(ss[1:]>ss[:-1])
t2=list(ss[:-1]>ss[1:])+[False]
k_log=[t1[i]&t2[i] for i in range(len(t1))]
if True in k_log:
k=k_log.index(True)
else:
k=1
k=k if k<=max_clusters-2 else max_clusters-2 # 限制最多分7类
k_best=klist[k]
else:
k_best=n_clusters
est = KMeans(k_best) # 4 clusters
est.fit(X1)
# 系数计算
SSE=np.sqrt(np.sum((X1-est.cluster_centers_[est.labels_])**2)/len(X1))
silhouette_score=metrics.silhouette_score(X1, est.labels_)
print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1),len(qlist),k_best))
print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE,silhouette_score))
# 绘制降维图
'''
X_PCA = PCA(2).fit_transform(X1)
kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),
edgecolor='none', alpha=0.6)
labels=pd.Series(est.labels_)
plt.figure()
plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)
'''
'''
# 三维立体图
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)
'''
# 导出到原数据
parameters={'methods':'kmeans','inertia':est.inertia_,'SSE':SSE,'silhouette':silhouette_score,\
'n_clusters':k_best,'n_features':len(qlist),'n_samples':len(X1),'qnum':new_cluster,\
'data':X1,'labels':est.labels_}
data[new_cluster]=pd.Series(est.labels_,index=index_bk)
code[new_cluster]={'content':'态度题聚类结果','qtype':'单选题','qlist':[new_cluster],
'code':dict(zip(range(k_best),['cluster{}'.format(i+1) for i in range(k_best)]))}
print('结果已经存进数据, 题号为:{}'.format(new_cluster))
return data,code,parameters
'''
# 对应分析
t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()
t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]
t=t.rename(index=code[new_cluster]['code'])
ca=prince.CA(t)
ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)
'''
def scatter(data,legend=False,title=None,font_ch=None,find_path=None):
'''
绘制带数据标签的散点图
'''
import matplotlib.font_manager as fm
if font_ch is None:
fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','msyh.ttc']
myfont=''
if not find_path:
find_paths=['C:\\Windows\\Fonts','']
# fontlist 越靠后越优先,findpath越靠后越优先
for find_path in find_paths:
for f in fontlist:
if os.path.exists(os.path.join(find_path,f)):
myfont=os.path.join(find_path,f)
if len(myfont)==0:
print('没有找到合适的中文字体绘图,请检查.')
myfont=None
else:
myfont = fm.FontProperties(fname=myfont)
else:
myfont=fm.FontProperties(fname=font_ch)
fig, ax = plt.subplots()
#ax.grid('on')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color=['blue','red','green','dark']
if not isinstance(data,list):
data=[data]
for i,dd in enumerate(data):
ax.scatter(dd.iloc[:,0], dd.iloc[:,1], c=color[i], s=50,
label=dd.columns[1])
for _, row in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i],fontproperties=myfont,fontsize=10)
ax.axis('equal')
if legend:
ax.legend(loc='best')
if title:
ax.set_title(title,fontproperties=myfont)
return fig
def sankey(df,filename=None):
'''SanKey图绘制
df的列是左节点,行是右节点
注:暂时没找到好的Python方法,所以只生成R语言所需数据
返回links 和 nodes
# R code 参考
library(networkD3)
dd=read.csv('price_links.csv')
links<-data.frame(source=dd$from,target=dd$to,value=dd$value)
nodes=read.csv('price_nodes.csv',encoding = 'UTF-8')
nodes<-nodes['name']
Energy=c(links=links,nodes=nodes)
sankeyNetwork(Links = links, Nodes = nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh",fontSize = 20,fontFamily='微软雅黑',nodeWidth=20)
'''
nodes=['Total']
nodes=nodes+list(df.columns)+list(df.index)
nodes=pd.DataFrame(nodes)
nodes['id']=range(len(nodes))
nodes.columns=['name','id']
R,C=df.shape
t1=pd.DataFrame(df.as_matrix(),columns=range(1,C+1),index=range(C+1,R+C+1))
t1.index.name='to'
t1.columns.name='from'
links=t1.unstack().reset_index(name='value')
links0=pd.DataFrame({'from':[0]*C,'to':range(1,C+1),'value':list(df.sum())})
links=links0.append(links)
if filename:
links.to_csv(filename+'_links.csv',index=False,encoding='utf-8')
nodes.to_csv(filename+'_nodes.csv',index=False,encoding='utf-8')
return (links,nodes)
def table(data,code,total=True):
'''
单个题目描述统计
code是data的编码,列数大于1
返回字典格式数据:
'fop':百分比, 对于单选题和为1,多选题分母为样本数
'fo': 观察频数表,其中添加了合计项
'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有
'''
# 单选题
qtype=code['qtype']
index=code['qlist']
data=pd.DataFrame(data)
sample_len=data[code['qlist']].notnull().T.any().sum()
result={}
if qtype == u'单选题':
fo=data.iloc[:,0].value_counts()
if 'weight' in code:
w=pd.Series(code['weight'])
fo1=fo[w.index][fo[w.index].notnull()]
fw=(fo1*w).sum()/fo1.sum()
result['fw']=fw
fo.sort_values(ascending=False,inplace=True)
fop=fo.copy()
fop=fop/fop.sum()*1.0
fop[u'合计']=fop.sum()
fo[u'合计']=fo.sum()
if 'code' in code:
fop.rename(index=code['code'],inplace=True)
fo.rename(index=code['code'],inplace=True)
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo=pd.DataFrame(fo)
result['fo']=fo
result['fop']=fop
elif qtype == u'多选题':
fo=data.sum()
fo.sort_values(ascending=False,inplace=True)
fo[u'合计']=fo.sum()
if 'code' in code:
fo.rename(index=code['code'],inplace=True)
fop=fo.copy()
fop=fop/sample_len
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo=pd.DataFrame(fo)
result['fop']=fop
result['fo']=fo
elif qtype == u'矩阵单选题':
fo=pd.DataFrame(columns=code['qlist'],index=sorted(code['code']))
for i in fo.columns:
fo.loc[:,i]=data[i].value_counts()
if 'weight' not in code:
code['weight']=dict(zip(code['code'].keys(),code['code'].keys()))
fw=pd.DataFrame(columns=[u'加权'],index=code['qlist'])
w=pd.Series(code['weight'])
for c in fo.columns:
t=fo[c]
t=t[w.index][t[w.index].notnull()]
if t.sum()>1e-17:
fw.loc[c,u'加权']=(t*w).sum()/t.sum()
else:
fw.loc[c,u'加权']=0
fw.rename(index=code['code_r'],inplace=True)
result['fw']=fw
result['weight']=','.join(['{}:{}'.format(code['code'][c],code['weight'][c]) for c in code['code']])
fo.rename(columns=code['code_r'],index=code['code'],inplace=True)
fop=fo.copy()
fop=fop/sample_len
result['fop']=fop
result['fo']=fo
elif qtype == u'排序题':
#提供综合统计和TOP1值统计
# 其中综合的算法是当成单选题,给每个TOP分配和为1的权重
#topn=max([len(data[q][data[q].notnull()].unique()) for q in index])
#topn=len(index)
topn=data[index].fillna(0).max().max()
topn=int(topn)
qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))
top1=data.applymap(lambda x:int(x==1))
data_weight=data.replace(qsort)
t1=pd.DataFrame()
t1['TOP1']=top1.sum()
t1[u'综合']=data_weight.sum()
t1.sort_values(by=u'综合',ascending=False,inplace=True)
t1.rename(index=code['code'],inplace=True)
t=t1.copy()
t=t/sample_len
result['fop']=t
result['fo']=t1
# 新增topn矩阵
t_topn=pd.DataFrame()
for i in range(topn):
t_topn['TOP%d'%(i+1)]=data.applymap(lambda x:int(x==i+1)).sum()
t_topn.sort_values(by=u'TOP1',ascending=False,inplace=True)
if 'code' in code:
t_topn.rename(index=code['code'],inplace=True)
result['TOPN_fo']=t_topn#频数
result['TOPN']=t_topn/sample_len
result['weight']='+'.join(['TOP{}*{:.2f}'.format(i+1,(topn-i)*2.0/(topn+1)/topn) for i in range(topn)])
else:
result['fop']=None
result['fo']=None
if (not total) and not(result['fo'] is None) and (u'合计' in result['fo'].index):
result['fo'].drop([u'合计'],axis=0,inplace=True)
result['fop'].drop([u'合计'],axis=0,inplace=True)
if not(result['fo'] is None) and ('code_order' in code):
code_order=[q for q in code['code_order'] if q in result['fo'].index]
if u'合计' in result['fo'].index:
code_order=code_order+[u'合计']
result['fo']=pd.DataFrame(result['fo'],index=code_order)
result['fop']=pd.DataFrame(result['fop'],index=code_order)
return result
def crosstab(data_index,data_column,code_index=None,code_column=None,qtype=None,total=True):
'''适用于问卷数据的交叉统计
输入参数:
data_index: 因变量,放在行中
data_column:自变量,放在列中
code_index: dict格式,指定data_index的编码等信息
code_column: dict格式,指定data_column的编码等信息
qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的
返回字典格式数据
'fop':默认的百分比表,行是data_index,列是data_column
'fo':原始频数表,且添加了总体项
'fw': 加权平均值
简要说明:
因为要处理各类题型,这里将单选题处理为多选题
fo:观察频数表
nij是同时选择了Ri和Cj的频数
总体的频数是选择了Ri的频数,与所在行的总和无关
行变量\列变量 C1 |C2 | C3| C4|总体
R1| n11|n12|n13|n14|n1:
R2| n21|n22|n23|n23|n2:
R3| n31|n32|n33|n34|n3:
fop: 观察百分比表(列变量)
这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时
选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数
fw: 加权平均值
如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值
'''
# 将Series转为DataFrame格式
data_index=pd.DataFrame(data_index)
data_column=pd.DataFrame(data_column)
# 获取行/列变量的题目类型
# 默认值
if data_index.shape[1]==1:
qtype1=u'单选题'
else:
qtype1=u'多选题'
if data_column.shape[1]==1:
qtype2=u'单选题'
else:
qtype2=u'多选题'
# 根据参数修正
if code_index:
qtype1=code_index['qtype']
if qtype1 == u'单选题':
data_index.replace(code_index['code'],inplace=True)
elif qtype1 in [u'多选题',u'排序题']:
data_index.rename(columns=code_index['code'],inplace=True)
elif qtype1 == u'矩阵单选题':
data_index.rename(columns=code_index['code_r'],inplace=True)
if code_column:
qtype2=code_column['qtype']
if qtype2 == u'单选题':
data_column.replace(code_column['code'],inplace=True)
elif qtype2 in [u'多选题',u'排序题']:
data_column.rename(columns=code_column['code'],inplace=True)
elif qtype2 == u'矩阵单选题':
data_column.rename(columns=code_column['code_r'],inplace=True)
if qtype:
#qtype=list(qtype)
if isinstance(qtype,list) and len(qtype)==2:
qtype1=qtype[0]
qtype2=qtype[1]
elif isinstance(qtype,str):
qtype1=qtype
if qtype1 == u'单选题':
data_index=sa_to_ma(data_index)
qtype1=u'多选题'
# 将单选题变为多选题
if qtype2 == u'单选题':
#data_column=pd.get_dummies(data_column.iloc[:,0])
data_column=sa_to_ma(data_column)
qtype2=u'多选题'
# 准备工作
index_list=list(data_index.columns)
columns_list=list(data_column.columns)
# 频数表/data_column各个类别的样本量
column_freq=data_column.iloc[list(data_index.notnull().T.any()),:].sum()
#column_freq[u'总体']=column_freq.sum()
column_freq[u'总体']=data_index.notnull().T.any().sum()
R=len(index_list)
C=len(columns_list)
result={}
result['sample_size']=column_freq
if (qtype1 == u'多选题') and (qtype2 == u'多选题'):
data_index.fillna(0,inplace=True)
t=pd.DataFrame(np.dot(data_index.fillna(0).T,data_column.fillna(0)))
t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
if code_index and ('weight' in code_index):
w=pd.Series(code_index['weight'])
w.rename(index=code_index['code'],inplace=True)
fw=pd.DataFrame(columns=[u'加权'],index=t.columns)
for c in t.columns:
tmp=t[c]
tmp=tmp[w.index][tmp[w.index].notnull()]
if abs(tmp.sum())>0:
fw.loc[c,u'加权']=(tmp*w).sum()/tmp.sum()
else:
fw.loc[c,u'加权']=0
fo1=data_index.sum()[w.index][data_index.sum()[w.index].notnull()]
if abs(fo1.sum())>0:
fw.loc[u'总体',u'加权']=(fo1*w).sum()/fo1.sum()
else:
fw.loc[u'总体',u'加权']=0
result['fw']=fw
t[u'总体']=data_index.sum()
t.sort_values([u'总体'],ascending=False,inplace=True)
t1=t.copy()
for i in t.columns:
if column_freq[i]!=0:
t.loc[:,i]=t.loc[:,i]/column_freq[i]
result['fop']=t
result['fo']=t1
elif (qtype1 == u'矩阵单选题') and (qtype2 == u'多选题'):
if code_index and ('weight' in code_index):
data_index.replace(code_index['weight'],inplace=True)
t=pd.DataFrame(np.dot(data_index.fillna(0).T,data_column.fillna(0)))
t=pd.DataFrame(np.dot(t,np.diag(1/data_column.sum())))
t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
t[u'总体']=data_index.mean()
t.sort_values([u'总体'],ascending=False,inplace=True)
t1=t.copy()
result['fop']=t
result['fo']=t1
elif (qtype1 == u'排序题') and (qtype2 == u'多选题'):
topn=int(data_index.max().max())
#topn=max([len(data_index[q][data_index[q].notnull()].unique()) for q in index_list])
qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))
data_index_zh=data_index.replace(qsort)
t=pd.DataFrame(np.dot(data_index_zh.fillna(0).T,data_column.fillna(0)))
t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
t[u'总体']=data_index_zh.sum()
t.sort_values([u'总体'],ascending=False,inplace=True)
t1=t.copy()
for i in t.columns:
if column_freq[i]!=0:
t.loc[:,i]=t.loc[:,i]/column_freq[i]
result['fop']=t
result['fo']=t1
# 新增TOP1 数据
data_index_top1=data_index.applymap(lambda x:int(x==1))
top1=pd.DataFrame(np.dot(data_index_top1.fillna(0).T,data_column.fillna(0)))
top1.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
top1[u'总体']=data_index_top1.fillna(0).sum()
top1.sort_values([u'总体'],ascending=False,inplace=True)
for i in top1.columns:
if column_freq[i]!=0:
top1.loc[:,i]=top1.loc[:,i]/column_freq[i]
result['TOP1']=top1
else:
result['fop']=None
result['fo']=None
# 去除总体
if (not total) and not(result['fo'] is None) and ('总体' in result['fo'].columns):
result['fo'].drop(['总体'],axis=1,inplace=True)
result['fop'].drop(['总体'],axis=1,inplace=True)
# 顺序重排
if not(result['fo'] is None) and code_index and ('code_order' in code_index) and qtype1!='矩阵单选题':
code_order=code_index['code_order']
code_order=[q for q in code_order if q in result['fo'].index]
if u'总体' in result['fo'].index:
code_order=code_order+[u'总体']
result['fo']=pd.DataFrame(result['fo'],index=code_order)
result['fop']=pd.DataFrame(result['fop'],index=code_order)
if not(result['fo'] is None) and code_column and ('code_order' in code_column) and qtype2!='矩阵单选题':
code_order=code_column['code_order']
code_order=[q for q in code_order if q in result['fo'].columns]
if u'总体' in result['fo'].columns:
code_order=code_order+[u'总体']
result['fo']=pd.DataFrame(result['fo'],columns=code_order)
result['fop']=pd.DataFrame(result['fop'],columns=code_order)
return result
def qtable(data,*args,**kwargs):
'''简易频数统计函数
输入
data:数据框,可以是所有的数据
code:数据编码
q1: 题目序号
q2: 题目序号
# 单个变量的频数统计
qtable(data,code,'Q1')
# 两个变量的交叉统计
qtable(data,code,'Q1','Q2')
'''
code=None
q1=None
q2=None
for a in args:
if (isinstance(a,str)) and (not q1):
q1=a
elif (isinstance(a,str)) and (q1):
q2=a
elif isinstance(a,dict):
code=a
if not code:
code=data_auto_code(data)
if not q1:
print('please input the q1,such as Q1.')
return
total=False
for key in kwargs:
if key == 'total':
total=kwargs['total']
if q2 is None:
result=table(data[code[q1]['qlist']],code[q1],total=total)
else:
result=crosstab(data[code[q1]['qlist']],data[code[q2]['qlist']],code[q1],code[q2],total=total)
return result
def association_rules(df,minSup=0.08,minConf=0.4,Y=None):
'''关联规则分析
df: DataFrame,bool 类型。是一个类似购物篮数据
'''
try:
df=df.astype(bool)
except:
print('df 必须为 bool 类型')
return (None,None,None)
columns = np.array(df.columns)
gen=associate.frequent_itemsets(np.array(df), minSup)
itemsets=dict(gen)
rules=associate.association_rules(itemsets,minConf)
rules=pd.DataFrame(list(rules))
if len(rules) == 0:
return (None,None,None)
# 依次是LHS、RHS、支持度、置信度
rules.columns=['antecedent','consequent','sup','conf']
rules['sup']=rules['sup']/len(df)
rules['antecedent']=rules['antecedent'].map(lambda x:[columns[i] for i in list(x)])
rules['consequent']=rules['consequent'].map(lambda x:[columns[i] for i in list(x)])
rules['rule']=rules['antecedent'].map(lambda x:','.join(['%s'%i for i in x]))\
+'-->'\
+rules['consequent'].map(lambda x:','.join(['%s'%i for i in x]))
result=';\n'.join(['{}: 支持度={:.1f}%, 置信度={:.1f}%'.format(\
rules.loc[ii,'rule'],100*rules.loc[ii,'sup'],100*rules.loc[ii,'conf']) for ii in rules.index[:4]])
return (result,rules,itemsets)
def contingency(fo,alpha=0.05):
''' 列联表分析:(观察频数表分析)
# 预增加一个各类别之间的距离
1、生成TGI指数、TWI指数、CHI指数
2、独立性检验
3、当两个变量不显著时,考虑单个之间的显著性
返回字典格式
chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件
coef: 包含chi2、p值、V相关系数
log: 记录一些异常情况
FO: 观察频数
FE: 期望频数
TGI:fo/fe
TWI:fo-fe
CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe)
significant:{
.'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)]
.'pvalue':
.'method': chi_test or fisher_test
.'vcoef':
.'threshold':
}
summary:{
.'summary': 结论提取
.'fit_test': 拟合优度检验
.'chi_std':
.'chi_mean':
'''
import scipy.stats as stats
cdata={}
if isinstance(fo,pd.core.series.Series):
fo=pd.DataFrame(fo)
if not isinstance(fo,pd.core.frame.DataFrame):
return cdata
R,C=fo.shape
# 去除所有的总体、合计、其他、其它
if u'总体' in fo.columns:
fo.drop([u'总体'],axis=1,inplace=True)
if any([(u'其他' in '%s'%s) or (u'其它' in '%s'%s) for s in fo.columns]):
tmp=[s for s in fo.columns if (u'其他' in s) or (u'其它' in s)]
for t in tmp:
fo.drop([t],axis=1,inplace=True)
if u'合计' in fo.index:
fo.drop([u'合计'],axis=0,inplace=True)
if any([(u'其他' in '%s'%s) or (u'其它' in '%s'%s) for s in fo.index]):
tmp=[s for s in fo.index if (u'其他' in s) or (u'其它' in s)]
for t in tmp:
fo.drop([t],axis=0,inplace=True)
fe=fo.copy()
N=fo.sum().sum()
if N==0:
#print('rpt.contingency:: fo的样本数为0,请检查数据')
return cdata
for i in fe.index:
for j in fe.columns:
fe.loc[i,j]=fe.loc[i,:].sum()*fe.loc[:,j].sum()/float(N)
TGI=fo/fe
TWI=fo-fe
CHI=np.sqrt((fo-fe)**2/fe)*(TWI.applymap(lambda x: int(x>0))*2-1)
PCHI=1/(1+np.exp(-1*CHI))
cdata['FO']=fo
cdata['FE']=fe
cdata['TGI']=TGI*100
cdata['TWI']=TWI
cdata['CHI']=CHI
cdata['PCHI']=PCHI
# 显著性检验(独立性检验)
significant={}
significant['threshold']=stats.chi2.ppf(q=1-alpha,df=C-1)
#threshold=math.ceil(R*C*0.2)# 期望频数和实际频数不得小于5
# 去除行、列变量中样本数和过低的变量
threshold=max(3,min(30,N*0.05))
ind1=fo.sum(axis=1)>=threshold
ind2=fo.sum()>=threshold
fo=fo.loc[ind1,ind2]
if (fo.shape[0]<=1) or (np.any(fo.sum()==0)) or (np.any(fo.sum(axis=1)==0)):
significant['result']=-2
significant['pvalue']=-2
significant['method']='fo not frequency'
#elif ((fo<=5).sum().sum()>=threshold):
#significant['result']=-1
#significant['method']='need fisher_exact'
'''fisher_exact运行所需时间极其的长,此处还是不作检验
fisher_r,fisher_p=fisher_exact(fo)
significant['pvalue']=fisher_p
significant['method']='fisher_exact'
significant['result']=fisher_r
'''
else:
try:
chiStats = stats.chi2_contingency(observed=fo)
except:
chiStats=(1,np.nan)
significant['pvalue']=chiStats[1]
significant['method']='chi-test'
#significant['vcoef']=math.sqrt(chiStats[0]/N/min(R-1,C-1))
if chiStats[1] <= alpha:
significant['result']=1
elif np.isnan(chiStats[1]):
significant['pvalue']=-2
significant['result']=-1
else:
significant['result']=0
cdata['significant']=significant
# 列联表分析summary
chi_sum=(CHI**2).sum(axis=1)
chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)#拟合优度检验
fit_test=chi_sum.map(lambda x : int(x>chi_value_fit))
summary={}
summary['fit_test']=fit_test
summary['chi_std']=CHI.unstack().std()
summary['chi_mean']=CHI.unstack().mean()
#print('the std of CHI is %.2f'%summary['chi_std'])
conclusion=''
fo_rank=fo.sum().rank(ascending=False)# 给列选项排名,只分析排名在前4选项的差异
for c in fo_rank[fo_rank<5].index:#CHI.columns:
#针对每一列,选出大于一倍方差的行选项,如果过多,则只保留前三个
tmp=list(CHI.loc[CHI[c]-summary['chi_mean']>summary['chi_std'],c].sort_values(ascending=False)[:3].index)
tmp=['%s'%s for s in tmp]# 把全部内容转化成字符串
if tmp:
tmp1=u'{col}:{s}'.format(col=c,s=' || '.join(tmp))
conclusion=conclusion+tmp1+'; \n'
if significant['result']==1:
if conclusion:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*显著*, 且CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*显著*,但没有找到相对有差异的配对'
elif significant['result']==0:
if conclusion:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*不显著*, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*不显著*,且没有找到相对有差异的配对'
else:
if conclusion:
tmp='不满足显著性检验(卡方检验)条件, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp='不满足显著性检验(卡方检验)条件,且没有找到相对有差异的配对'
conclusion=tmp+conclusion
summary['summary']=conclusion
cdata['summary']=summary
return cdata
def pre_cross_qlist(data,code):
'''自适应给出可以进行交叉分析的变量和相应选项
满足以下条件的将一键交叉分析:
1、单选题
2、如果选项是文本,则平均长度应小于10
...
返回:
cross_qlist: [[题目序号,变量选项],]
'''
cross_qlist=[]
for qq in code:
qtype=code[qq]['qtype']
qlist=code[qq]['qlist']
content=code[qq]['content']
sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()
if qtype not in ['单选题']:
continue
if not(set(qlist) <= set(data.columns)):
continue
t=qtable(data,code,qq)['fo']
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
code_order=[q for q in code_order if q in t.index]
t=pd.DataFrame(t,index=code_order)
items=list(t.index)
code_values=list(code[qq]['code'].values())
if len(items)<=1:
continue
if all([isinstance(t,str) for t in code_values]):
if sum([len(t) for t in code_values])/len(code_values)>15:
continue
if ('code_order' in code[qq]) and (len(items)<10):
code_order=[q for q in code[qq]['code_order'] if q in t.index]
t=pd.DataFrame(t,index=code_order)
ind=np.where(t['频数']>=10)[0]
if len(ind)>0:
cross_order=list(t.index[range(ind[0],ind[-1]+1)])
cross_qlist.append([qq,cross_order])
continue
if re.findall('性别|年龄|gender|age',content.lower()):
cross_qlist.append([qq,items])
continue
if (len(items)<=sample_len_qq/30) and (len(items)<10):
cross_order=list(t.index[t['频数']>=10])
if cross_order:
cross_qlist.append([qq,cross_order])
continue
return cross_qlist
'''
import report as rpt
ppt=rpt.Report(template)
ppt.add_cover(filename)
ppt.add_slide(data=,title)
ppt.save()
ppt.plo
'''
def cross_chart(data,code,cross_class,filename=u'交叉分析报告', cross_qlist=None,\
delclass=None,plt_dstyle=None,cross_order=None,reverse_display=False,\
total_display=True,max_column_chart=20,save_dstyle=None,template=None):
'''使用帮助
data: 问卷数据,包含交叉变量和所有的因变量
code: 数据编码
cross_class: 交叉变量,单选题或者多选题,例如:Q1
filename:文件名,用于PPT和保存相关数据
cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量
delclass: 交叉变量中需要删除的单个变量,缺省空
plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等
save_dstyle: 需要保存的数据类型,格式为列表。
cross_order: 交叉变量中各个类别的顺序,可以缺少
total_display: PPT绘制图表中是否显示总体情况
max_column_chart: 列联表的列数,小于则用柱状图,大于则用条形图
template: PPT模板信息,{'path': 'layouts':}缺省用自带的。
'''
# ===================参数预处理=======================
if plt_dstyle:
plt_dstyle=plt_dstyle.upper()
if not cross_qlist:
try:
cross_qlist=list(sorted(code,key=lambda c: int(re.findall('\d+',c)[0])))
except:
cross_qlist=list(code.keys())
if cross_class in cross_qlist:
cross_qlist.remove(cross_class)
# =================基本数据获取==========================
#交叉分析的样本数统一为交叉变量的样本数
sample_len=data[code[cross_class]['qlist']].notnull().T.any().sum()
# 交叉变量中每个类别的频数分布.
if code[cross_class]['qtype'] == u'单选题':
#data[cross_class].replace(code[cross_class]['code'],inplace=True)
cross_class_freq=data[code[cross_class]['qlist'][0]].value_counts()
cross_class_freq[u'合计']=cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'],inplace=True)
#cross_columns_qlist=code[cross_class]['qlist']
elif code[cross_class]['qtype'] == u'多选题':
cross_class_freq=data[code[cross_class]['qlist']].sum()
cross_class_freq[u'合计']=cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'],inplace=True)
#data.rename(columns=code[cross_class]['code'],inplace=True)
#cross_columns_qlist=[code[cross_class]['code'][k] for k in code[cross_class]['qlist']]
elif code[cross_class]['qtype'] == u'排序题':
tmp=qtable(data,code,cross_class)
#tmp,tmp1=table(data[code[cross_class]['qlist']],code[cross_class])
cross_class_freq=tmp['fo'][u'综合']
cross_class_freq[u'合计']=cross_class_freq.sum()
# ================I/O接口=============================
# pptx 接口
prs=rpt.Report(template) if template else rpt.Report()
if not os.path.exists('.\\out'):
os.mkdir('.\\out')
# 生成数据接口(因为exec&eval)
Writer=pd.ExcelWriter('.\\out\\'+filename+u'.xlsx')
Writer_save={}
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[u'Writer_'+dstyle]=pd.ExcelWriter('.\\out\\'+filename+u'_'+dstyle+'.xlsx')
result={}#记录每道题的的统计数据,用户函数的返回数据
# 记录没到题目的样本数和显著性差异检验结果,用于最后的数据输出
cross_columns=list(cross_class_freq.index)
cross_columns=[r for r in cross_columns if r!=u'合计']
cross_columns=['内容','题型']+cross_columns+[u'总体',u'显著性检验']
conclusion=pd.DataFrame(index=cross_qlist,columns=cross_columns)
conclusion.to_excel(Writer,u'索引')
# ================封面页=============================
prs.add_cover(title=filename)
# ================背景页=============================
title=u'说明'
summary=u'交叉题目为'+cross_class+u': '+code[cross_class]['content']
summary=summary+'\n'+u'各类别样本量如下:'
prs.add_slide(data={'data':cross_class_freq,'slide_type':'table'},title=title,\
summary=summary)
data_column=data[code[cross_class]['qlist']]
for qq in cross_qlist:
# 遍历所有题目
#print(qq)
qtitle=code[qq]['content']
qlist=code[qq]['qlist']
qtype=code[qq]['qtype']
if not(set(qlist) <= set(data.columns)):
continue
data_index=data[qlist]
sample_len=data_column.iloc[list(data_index.notnull().T.any()),:].notnull().T.any().sum()
summary=None
if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:
continue
# 交叉统计
try:
if reverse_display:
result_t=crosstab(data_column,data_index,code_index=code[cross_class],code_column=code[qq])
else:
result_t=crosstab(data_index,data_column,code_index=code[qq],code_column=code[cross_class])
except :
print('脚本在处理{}时出了一天小问题.....')
continue
if ('fo' in result_t) and ('fop' in result_t):
t=result_t['fop']
t1=result_t['fo']
qsample=result_t['sample_size']
else:
continue
if t is None:
continue
# =======数据修正==============
if cross_order and (not reverse_display):
if u'总体' not in cross_order:
cross_order=cross_order+[u'总体']
cross_order=[q for q in cross_order if q in t.columns]
t=pd.DataFrame(t,columns=cross_order)
t1=pd.DataFrame(t1,columns=cross_order)
if cross_order and reverse_display:
cross_order=[q for q in cross_order if q in t.index]
t=pd.DataFrame(t,index=cross_order)
t1=pd.DataFrame(t1,index=cross_order)
'''在crosstab中已经重排了
if 'code_order' in code[qq] and qtype!='矩阵单选题':
code_order=code[qq]['code_order']
if reverse_display:
#code_order=[q for q in code_order if q in t.columns]
if u'总体' in t1.columns:
code_order=code_order+[u'总体']
t=pd.DataFrame(t,columns=code_order)
t1=pd.DataFrame(t1,columns=code_order)
else:
#code_order=[q for q in code_order if q in t.index]
t=pd.DataFrame(t,index=code_order)
t1=pd.DataFrame(t1,index=code_order)
'''
t.fillna(0,inplace=True)
t1.fillna(0,inplace=True)
# =======保存到Excel中========
t2=pd.concat([t,t1],axis=1)
t2.to_excel(Writer,qq,index_label=qq,float_format='%.3f')
Writer_rows=len(t2)# 记录当前Excel文件写入的行数
pd.DataFrame(qsample,columns=['样本数']).to_excel(Writer,qq,startrow=Writer_rows+2)
Writer_rows+=len(qsample)+2
#列联表分析
cdata=contingency(t1,alpha=0.05)# 修改容错率
result[qq]=cdata
if cdata:
summary=cdata['summary']['summary']
# 保存各个指标的数据
if save_dstyle:
for dstyle in save_dstyle:
cdata[dstyle].to_excel(Writer_save[u'Writer_'+dstyle],qq,index_label=qq,float_format='%.2f')
if qtype in [u'单选题',u'多选题',u'排序题']:
plt_data=t*100
else:
plt_data=t.copy()
if (abs(1-plt_data.sum())<=0.01+1e-17).all():
plt_data=plt_data*100
# ========================【特殊题型处理区】================================
if 'fw' in result_t:
plt_data=result_t['fw']
if cross_order and (not reverse_display):
if u'总体' not in cross_order:
cross_order=cross_order+[u'总体']
cross_order=[q for q in cross_order if q in plt_data.index]
plt_data=pd.DataFrame(plt_data,index=cross_order)
plt_data.to_excel(Writer,qq,startrow=Writer_rows+2)
Writer_rows+=len(plt_data)
if plt_dstyle and isinstance(cdata,dict) and (plt_dstyle in cdata):
plt_data=cdata[plt_dstyle]
# 绘制PPT
title=qq+'['+qtype+']: '+qtitle
if not summary:
summary=u'这里是结论区域.'
if 'significant' in cdata:
sing_result=cdata['significant']['result']
sing_pvalue=cdata['significant']['pvalue']
else:
sing_result=-2
sing_pvalue=-2
footnote=u'显著性检验的p值为{:.3f},数据来源于{},样本N={}'.format(sing_pvalue,qq,sample_len)
# 保存相关数据
conclusion.loc[qq,:]=qsample
conclusion.loc[qq,[u'内容',u'题型']]=pd.Series({u'内容':code[qq]['content'],u'题型':code[qq]['qtype']})
conclusion.loc[qq,u'显著性检验']=sing_result
if (not total_display) and (u'总体' in plt_data.columns):
plt_data.drop([u'总体'],axis=1,inplace=True)
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
# 排序题特殊处理
if (qtype == u'排序题') and ('TOP1' in result_t):
plt_data=result_t['TOP1']*100
# =======数据修正==============
if cross_order and (not reverse_display):
if u'总体' not in cross_order:
cross_order=cross_order+[u'总体']
cross_order=[q for q in cross_order if q in plt_data.columns]
plt_data=pd.DataFrame(plt_data,columns=cross_order)
if cross_order and reverse_display:
cross_order=[q for q in cross_order if q in plt_data.index]
plt_data=pd.DataFrame(plt_data,index=cross_order)
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
if reverse_display:
#code_order=[q for q in code_order if q in t.columns]
if u'总体' in t1.columns:
code_order=code_order+[u'总体']
plt_data=pd.DataFrame(plt_data,columns=code_order)
else:
#code_order=[q for q in code_order if q in t.index]
plt_data=pd.DataFrame(plt_data,index=code_order)
plt_data.fillna(0,inplace=True)
title='[TOP1]' + title
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
'''
# ==============小结页=====================
difference=pd.Series(difference,index=total_qlist_0)
'''
# ========================文件生成和导出======================
#difference.to_csv('.\\out\\'+filename+u'_显著性检验.csv',encoding='gbk')
if plt_dstyle:
filename=filename+'_'+plt_dstyle
try:
prs.save('.\\out\\'+filename+u'.pptx')
except:
prs.save('.\\out\\'+filename+u'_副本.pptx')
conclusion.to_excel(Writer,'索引')
Writer.save()
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[u'Writer_'+dstyle].save()
return result
def summary_chart(data,code,filename=u'整体统计报告', summary_qlist=None,\
max_column_chart=20,template=None):
# ===================参数预处理=======================
if not summary_qlist:
try:
summary_qlist=list(sorted(code,key=lambda c: int(re.findall('\d+',c)[0])))
except:
summary_qlist=list(code.keys())
# =================基本数据获取==========================
#统一的有效样本,各个题目可能有不能的样本数
sample_len=len(data)
# ================I/O接口=============================
# pptx 接口
prs=rpt.Report(template) if template else rpt.Report()
if not os.path.exists('.\\out'):
os.mkdir('.\\out')
Writer=pd.ExcelWriter('.\\out\\'+filename+'.xlsx')
result={}#记录每道题的过程数据
# 记录样本数等信息,用于输出
conclusion=pd.DataFrame(index=summary_qlist,columns=[u'内容',u'题型',u'样本数'])
conclusion.to_excel(Writer,u'索引')
# ================封面页=============================
prs.add_cover(title=filename)
# ================背景页=============================
title=u'说明'
qtype_count=[code[k]['qtype'] for k in code]
qtype_count=[[qtype,qtype_count.count(qtype)] for qtype in set(qtype_count)]
qtype_count=sorted(qtype_count,key=lambda x:x[1],reverse=True)
summary='该数据一共有{}个题目,其中有'.format(len(code))
summary+=','.join(['{} {} 道'.format(t[0],t[1]) for t in qtype_count])
summary+='.\n 经统计, 该数据有效样本数为 {} 份。下表是在该样本数下,各比例对应的置信区间(置信水平95%).'.format(sample_len)
w=pd.DataFrame(index=[(i+1)*0.05 for i in range(10)],columns=['比例','置信区间'])
w['比例']=w.index
w['置信区间']=w['比例'].map(lambda x:confidence_interval(x,sample_len))
w['置信区间']=w['置信区间'].map(lambda x:'±{:.1f}%'.format(x*100))
w['比例']=w['比例'].map(lambda x:'{:.0f}% / {:.0f}%'.format(x*100,100-100*x))
w=w.set_index('比例')
prs.add_slide(data={'data':w,'slide_type':'table'},title=title,summary=summary)
for qq in summary_qlist:
'''
特殊题型处理
整体满意度题:后期归为数值类题型
'''
#print(qq)
qtitle=code[qq]['content']
qlist=code[qq]['qlist']
qtype=code[qq]['qtype']
if not(set(qlist) <= set(data.columns)):
continue
sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()
conclusion.loc[qq,u'内容']=qtitle
conclusion.loc[qq,u'题型']=qtype
conclusion.loc[qq,u'样本数']=sample_len_qq
# 填空题只统计数据,不绘图
if qtype == '填空题':
startcols=0
for qqlist in qlist:
tmp=pd.DataFrame(data[qqlist].value_counts()).reset_index()
tmp.to_excel(Writer,qq,startcol=startcols,index=False)
startcols+=3
continue
if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:
continue
try:
result_t=table(data[qlist],code=code[qq])
except:
print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))
continue
t=result_t['fop']
t1=result_t['fo']
# =======数据修正==============
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
code_order=[q for q in code_order if q in t.index]
if u'合计' in t.index:
code_order=code_order+[u'合计']
t=pd.DataFrame(t,index=code_order)
t1=pd.DataFrame(t1,index=code_order)
t.fillna(0,inplace=True)
t1.fillna(0,inplace=True)
# =======保存到Excel中========
Writer_rows=0
t2=pd.concat([t,t1],axis=1)
t2.to_excel(Writer,qq,startrow=Writer_rows,index_label=qq,float_format='%.3f')
Writer_rows+=len(t2)+2
# ==========根据个题型提取结论==================
summary=''
if qtype in ['单选题','多选题']:
try:
gof_result=gof_test(t1)
except :
gof_result=-2
if gof_result==1:
summary+='拟合优度检验*显著*'
elif gof_result==0:
summary+='拟合优度检验*不显著*'
else:
summary+='不满足拟合优度检验条件'
if qtype == '多选题':
tmp=data[qlist].rename(columns=code[qq]['code'])
tmp_t=len(tmp)*tmp.shape[1]*np.log(tmp.shape[1])
if tmp_t<20000:
minSup=0.08
minConf=0.40
elif tmp_t<50000:
minSup=0.15
minConf=0.60
else:
minSup=0.20
minConf=0.60
aso_result,rules,freq=association_rules(tmp,minSup=minSup,minConf=minConf)
numItem_mean=t1.sum().sum()/sample_len_qq
if u'合计' in t1.index:
numItem_mean=numItem_mean/2
if aso_result:
summary+=' || 平均每个样本选了{:.1f}个选项 || 找到的关联规则如下(只显示TOP4):\n{}'.format(numItem_mean,aso_result)
rules.to_excel(Writer,qq,startrow=Writer_rows,index=False,float_format='%.3f')
Writer_rows+=len(rules)+2
else:
summary+=' || 平均每个样本选了{:.1f}个选项 || 没有找到关联性较大的规则'.format(numItem_mean)
# 各种题型的结论和相关注释。
if (qtype in [u'单选题']) and 'fw' in result_t:
tmp=u'加权平均值'
if ('name' in code[qq]) and code[qq]['name']==u'满意度':
tmp=u'满意度平均值'
elif ('name' in code[qq]) and code[qq]['name']=='NPS':
tmp=u'NPS值'
summary+=' || {}为:{:.3f}'.format(tmp,result_t['fw'])
elif qtype =='排序题':
summary+=' 此处“综合”指标的计算方法为 :={}/总频数.'.format(result_t['weight'])
if len(summary)==0:
summary+=u'这里是结论区域'
# ===============数据再加工==========================
if qtype in [u'单选题',u'多选题',u'排序题']:
plt_data=t*100
else:
plt_data=t.copy()
if u'合计' in plt_data.index:
plt_data.drop([u'合计'],axis=0,inplace=True)
result[qq]=plt_data
title=qq+'['+qtype+']: '+qtitle
footnote=u'数据来源于%s,样本N=%d'%(qq,sample_len_qq)
# 绘制图表plt_data一般是Series,对于矩阵单选题,其是DataFrame
if len(t)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
elif (len(t)>3) or (len(plt_data.shape)>1 and plt_data.shape[1]>1):
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'PIE'},\
title=title,summary=summary,footnote=footnote)
#==============特殊题型处理===============
# 矩阵单选题特殊处理
if (qtype == u'矩阵单选题') and ('fw' in result_t):
plt_data=result_t['fw']
plt_data.rename(columns={u'加权':u'平均值'},inplace=True)
plt_data.to_excel(Writer,qq,startrow=Writer_rows,float_format='%.3f')
Writer_rows=len(plt_data)+2
plt_data.fillna(0,inplace=True)
title='[平均值]'+title
summary=summary+' || 该平均分采用的权值是:\n'+result_t['weight']
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_STACKED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_STACKED'},\
title=title,summary=summary,footnote=footnote)
# 排序题特殊处理
if (qtype == u'排序题') and ('TOPN' in result_t):
plt_data=result_t['TOPN']
# 将频数和频数百分表保存至本地
tmp=pd.concat([result_t['TOPN'],result_t['TOPN_fo']],axis=1)
tmp.to_excel(Writer,qq,startrow=Writer_rows,float_format='%.3f')
Writer_rows=len(plt_data)+2
plt_data=plt_data*100
# =======数据修正==============
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
#code_order=[q for q in code_order if q in t.index]
if u'合计' in plt_data.index:
code_order=code_order+[u'合计']
plt_data=pd.DataFrame(plt_data,index=code_order)
plt_data.fillna(0,inplace=True)
title='[TOPN]'+title
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_STACKED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_STACKED'},\
title=title,summary=summary,footnote=footnote)
# ========================文件生成和导出======================
try:
prs.save('.\\out\\'+filename+u'.pptx')
except:
prs.save('.\\out\\'+filename+u'_副本.pptx')
conclusion.to_excel(Writer,'索引')
Writer.save()
return result
def onekey_gen(data,code,filename=u'reprotgen 报告自动生成',template=None):
'''一键生成所有可能需要的报告
包括
描述统计报告
单选题的交叉分析报告
'''
try:
summary_chart(data,code,filename=filename,template=template);
except:
print('整体报告生成过程中出现错误,将跳过..')
pass
print('已生成 '+filename)
cross_qlist=pre_cross_qlist(data,code)
if len(cross_qlist)==0:
return None
for cross_qq in cross_qlist:
qq=cross_qq[0]
cross_order=cross_qq[1]
if ('name' in code[qq]) and (code[qq]['name']!=''):
filename='{}_差异分析'.format(code[qq]['name'])
else:
filename='{}_差异分析'.format(qq)
save_dstyle=None #['TGI','CHI']
try:
cross_chart(data,code,qq,filename=filename,cross_order=cross_order,\
save_dstyle=save_dstyle,template=template);
print('已生成 '+filename)
except:
print(filename+'生成过程中出现错误,将跳过...')
pass
return None
def scorpion(data,code,filename='scorpion'):
'''天蝎X计划
返回一个excel文件
1、索引
2、各个题目的频数表
3、所有可能的交叉分析
'''
if not os.path.exists('.\\out'):
os.mkdir('.\\out')
Writer=pd.ExcelWriter('.\\out\\'+filename+'.xlsx')
try:
qqlist=list(sorted(code,key=lambda c: int(re.findall('\d+',c)[0])))
except:
qqlist=list(code.keys())
qIndex=pd.DataFrame(index=qqlist,columns=[u'content',u'qtype',u'SampleSize'])
qIndex.to_excel(Writer,u'索引')
# 生成索引表和频数表
Writer_rows=0
for qq in qqlist:
qtitle=code[qq]['content']
qlist=code[qq]['qlist']
qtype=code[qq]['qtype']
if not(set(qlist) <= set(data.columns)):
continue
sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()
qIndex.loc[qq,u'content']=qtitle
qIndex.loc[qq,u'qtype']=qtype
qIndex.loc[qq,u'SampleSize']=sample_len_qq
if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:
continue
try:
result_t=table(data[qlist],code=code[qq])
except:
print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))
continue
fop=result_t['fop']
fo=result_t['fo']
if (qtype == u'排序题') and ('TOPN' in result_t):
tmp=result_t['TOPN']
tmp[u'综合']=fo[u'综合']
fo=tmp.copy()
tmp=result_t['TOPN_fo']
tmp[u'综合']=fop[u'综合']
fop=tmp.copy()
# =======保存到Excel中========
fo_fop=pd.concat([fo,fop],axis=1)
fo_fop.to_excel(Writer,u'频数表',startrow=Writer_rows,startcol=1,index_label=code[qq]['content'],float_format='%.3f')
tmp= | pd.DataFrame({'name':[qq]}) | pandas.DataFrame |
"""Tests the stored flow mappings to provide quality assurance."""
import unittest
import pandas as pd
import fedelemflowlist
def get_required_flowmapping_fields():
"""Gets required field names for Flow Mappingt:return:list of required fields."""
from fedelemflowlist.globals import flowmapping_fields
required_fields = []
for k, v in flowmapping_fields.items():
if v[1]['required']:
required_fields.append(k)
return required_fields
class TestFlowMappings(unittest.TestCase):
"""Add doctring."""
def setUp(self):
"""Get flowlist used for all tests."""
self.flowmappings = fedelemflowlist.get_flowmapping()
self.flowlist = self.flowlist = fedelemflowlist.get_flows()
def test_no_nas_in_required_fields(self):
"""Checks that no flows have na values in required fields."""
required_fields = get_required_flowmapping_fields()
flowmappings_w_required = self.flowmappings[required_fields]
flowmappings_w_required.reset_index(drop=True, inplace=True)
nas_in_required = flowmappings_w_required.dropna()
# To Identify mappings with missing fields
missing = flowmappings_w_required[~flowmappings_w_required.index.isin(nas_in_required.index)]
self.assertEqual(len(flowmappings_w_required), len(nas_in_required))
def test_targetflowinfo_matches_flows_in_list(self):
"""Checks that target flow information in the mapping files matches a flow in the flowlist."""
flowmapping_targetinfo = self.flowmappings[['SourceListName',
'TargetFlowName', 'TargetFlowUUID',
'TargetFlowContext']]
flowmapping_targetinfo.columns = ['SourceListName','Flowable', 'Flow UUID', 'Context']
flowmappings_w_flowlist = | pd.merge(flowmapping_targetinfo,self.flowlist) | pandas.merge |
# # Jupyter Notebook for Counting Building Occupancy from Polaris Traffic Simulation Data
#
# This notebook will load a Polaris SQLlite data file into a Pandas data frame using sqlite3 libraries and count the average number of people in each building in each hour of the simulation.
#
# For help with Jupyter notebooks
#
# For help on using sql with Pandas see
# http://www.pererikstrandberg.se/blog/index.cgi?page=PythonDataAnalysisWithSqliteAndPandas
#
# For help on data analysis with Pandas see
# http://nbviewer.jupyter.org/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/Index.ipynb
#
import sqlite3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
fname = ".\data\detroit-Demand.sqlite"
outfolder = ".\\output\\"
print(f'Connecting to database {fname}')
# Create your connection. Assumes data is in a parallel subdirectory to this one
cnx = sqlite3.connect('.\data\detroit2-Demand.sqlite')
print("getting location data from SQLite File")
# exract all the beginning locations of the building simulation
beginning_location = | pd.read_sql_query("SELECT * FROM Beginning_Location_All", cnx) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 12:52:08 2019
@author: ScmayorquinS
"""
# Necessary libraries
import requests
from bs4 import BeautifulSoup
import re
import itertools
import pandas as pd
import os
import urllib
import PyPDF2
import time
import glob
#------------------------------------------------
# Scraping to extract PND text since 1961 to 2018
#------------------------------------------------
# Request html
html = requests.get('https://www.dnp.gov.co/Plan-Nacional-de-Desarrollo/Paginas/Planes-de-Desarrollo-anteriores.aspx').text
# Html structure
soup = BeautifulSoup(html,'lxml')
# Easy reading html
pretty = soup.prettify()
# 'a' tag contains every document in web page
documents = soup.find_all('a')
# 'span' tag contains the name of every PND
dirty_names = soup.find_all('span')
# Use regex to extract the names of every PND
names = re.findall("FondoGris\">(.+?)<", str(dirty_names))
# Use regex to extract years from names
years = re.findall('\d+-\d+', str(names))
# Extract every link with regex
links = re.findall("(?P<url>https?://[^\s]+)", str(documents))
# Delete links that do not belong to chapters
del(links[91:113])
del(links[0:6])
del(links[0])
del(links[84])
# Extract name of every chapter
chapters = re.findall("textoRojo\">(.+?)<", str(documents))
# Insert missing chapter from Betancur
chapters.insert(40, 'Fundamentos Plan')
# Match both lists
del(chapters[1])
del(links[9])
# Remove other unnecessary pdfs
del(links[0])
del(links[2])
del(links[4])
del(links[4])
# Insert more missing chapters to match with 'links'
chapters.insert(0, 'Santos I Tomo II')
chapters.insert(0, 'Santos I Tomo I')
chapters.insert(0, 'Santos II Tomo II')
chapters.insert(0, 'Santos II Tomo I')
# Clean links: Remove " at the end of element in list
clean_links = [s.replace('"', '') for s in links]
clean_links = [s.replace('><span', '') for s in clean_links]
# Last PND not available in initial html
duque = 'https://colaboracion.dnp.gov.co/CDT/Prensa/BasesPND2018-2022n.pdf'
# Insert document in pdf list and fill the rest of the lists with its data
clean_links.insert(0, duque)
chapters.insert(0, 'Pacto por Colombia pacto por la equidad')
names.insert(0, 'Pacto por Colombia pacto por la equidad (2018-2022) - <NAME>')
years.insert(0, '2018-2022')
# Other PND not available in initial html
uribe2_tome2 = "https://colaboracion.dnp.gov.co/CDT/PND/PND_Tomo_2.pdf"
clean_links.insert(5, uribe2_tome2)
chapters.insert(5, 'Estado Comunitario_Tomo_2')
names.insert(3, 'Estado Comunitario (2006-2010) - <NAME>')
years.insert(3, '2006-2010')
uribe2_tome1 = "https://colaboracion.dnp.gov.co/CDT/PND/PND_Tomo_1.pdf"
clean_links.insert(6, uribe2_tome1)
chapters.insert(6, 'Estado Comunitario_Tomo_1')
# Match number of chapters with its repeating name; list of lists
rep_pnds = [names[0]] * 1, [names[1]] * 2, [names[2]] * 2, [names[3]] * 2, [names[4]] * 1, [names[5]] * 9, [names[6]] * 10, [names[7]] * 12, [names[8]] * 7, [names[9]] * 5, [names[10]] * 6, [names[11]] * 5, [names[12]] * 3, [names[13]] * 9, [names[14]] * 8
# Unlist previous object
lista_pnds = list(itertools.chain.from_iterable(rep_pnds))
#-------------------------------------------------------------------------
# Paste previous list to its respective links and chapters in a data frame
#-------------------------------------------------------------------------
# Dicctionary with the data frame columns
dic = {'Planes Nacionales de Desarrollo':lista_pnds, 'Capítulos o tomos':chapters, 'Link':clean_links}
# Convert dictionary to data frame
pnd_table = | pd.DataFrame(dic, columns = ['Planes Nacionales de Desarrollo','Capítulos o tomos','Link']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["barata"], 'english': ["german_cockroach"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["coipo"], 'english': ["coypu"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cuncuna"], 'english': ["caterpillar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carpincho"], 'english': ["capybara"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jote"], 'english': ["buzzard"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["zorzal"], 'english': ["fieldfare"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guanaco"], 'english': ["guanaco"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append( | pd.DataFrame({'spanish': ["pejerrey"], 'english': ["silverside"]}) | pandas.DataFrame |
import streamlit as st
import math
from scipy.stats import *
import pandas as pd
import numpy as np
from plotnine import *
def app():
# title of the app
st.subheader("Proportions")
st.sidebar.subheader("Proportion Settings")
prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"])
if prop_choice == "One Proportion":
c1,c2,c3 = st.columns(3)
with c1:
x = int(st.text_input("Hits",20))
n = int(st.text_input("Tries",25))
with c2:
nullp = float(st.text_input("Null:",.7))
alpha = float(st.text_input("Alpha",.05))
with c3:
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat = x/n
tsd = math.sqrt(nullp*(1-nullp)/n)
cise = math.sqrt(p_hat*(1-p_hat)/n)
z = (p_hat - nullp)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = | pd.DataFrame({"x":x,"y":y}) | pandas.DataFrame |
# Copyright 2020 The Johns Hopkins University Applied Physics Laboratory LLC
# All rights reserved.
# Distributed under the terms of the MIT License.
import pandas as pd
def gen_state(demand, prof):
counties = (
pd.DataFrame(demand, index=["demand"])
.T.reset_index()
.rename(columns={"index": "county"})
)
counties["state"] = counties.county.apply(lambda x: x[:-3])
state_demand = counties.groupby("state")["demand"].sum()
state_prof = (
pd.DataFrame(prof).T.reset_index().rename(columns={"index": "state"})
)
state_demand = state_demand.to_frame().reset_index()
counties = | pd.merge(counties, state_demand, on="state", how="left") | pandas.merge |
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index= | range(test_size) | pandas.compat.range |
import os
import pandas as pd
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import re
from ipywidgets import widgets, interact
# Deep Face
from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.commons import functions
# https://github.com/serengil/deepface/blob/master/tests/face-recognition-how.py
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
# Custom imports
from .face_detection import FaceDetector
from .frame import Frame
class Video:
def __init__(self,frames = None,path = None):
if path is not None:
paths = natural_sort(os.listdir(path))
self.frames = [Frame(os.path.join(path,x)) for x in paths]
else:
self.frames = frames
def resize(self,*args,**kwargs):
for i in tqdm(range(len(self.frames))):
self.frames[i].resize(*args,**kwargs)
def extract_faces(self,detector = None,scale_factor = 1.3,min_neighbors = 5,face_size = (100,100),deepface_check = True,deepface_backend = "opencv"):
if detector is None:
detector = FaceDetector()
self.faces = []
self.faces_metadata = []
for i,frame in enumerate(tqdm(self.frames)):
faces = frame.extract_faces(detector,scale_factor = scale_factor,min_neighbors = min_neighbors)
for j,face in enumerate(faces):
face.resize(size = face_size)
if deepface_check:
try:
DeepFace.detectFace(face.array,enforce_detection = True)
except:
continue
self.faces.append(face)
self.faces_metadata.append({"frame_id":i,"face_id":j})
self.faces_metadata = | pd.DataFrame(self.faces_metadata) | pandas.DataFrame |
import pandas as pd
from numpy.random import randint
data = | pd.read_csv('mubeena1.csv') | pandas.read_csv |
# data loading
__author__ = 'Guen'
import sys,os,glob,fnmatch,datetime,time
import configparser, logging
import numpy as np
import pandas as pd
import json
from .gutil import get_config
from PyQt4 import QtGui
import imp
config = get_config()
_DATA_FOLDER = config.get('Data','DataFolder')
if 'DATA_DIR' in os.environ.keys():
_DATA_FOLDER = os.environ['DATA_DIR']
_ANALYSIS_FOLDER = config.get('Analysis', 'AnalysisFolder')
sys.path.append(_ANALYSIS_FOLDER)
try:
_analysis_module = __import__(config.get('Analysis', 'AnalysisModule'))
except:
logging.warning("Analysis module error")
_analysis_module = None
def get_latest_stamp():
return max(glob.iglob('*.dat'), key=os.path.getctime)
def analyse(stamp, analysis_module = _analysis_module):
'''
Perform module_name.do_analyse function on d (pandas dataframe) and return result
'''
d = get(stamp)
m=imp.reload(_analysis_module)
return m.do_analyse(d)
def load():
'''
Open file dialog and load .dat or .csv
Return pandas dataframe with bonus attributes .filepath, .stamp and .meta (from meta/json file)
'''
if not QtGui.QApplication.instance():
QtGui.QApplication(sys.argv)
fileDialog = QtGui.QFileDialog()
filepath = fileDialog.getOpenFileName(directory = _DATA_FOLDER)
extension = filepath[-4:]
if '.dat' in filepath:
d = pd.read_csv(filepath,sep='\t')
elif '.csv' in filepath:
d = pd.read_csv(filepath)
else:
raise Warning("Can't load data. Please supply a .dat or .csv file.")
d = pd.DataFrame()
jsonfile = os.path.join(os.path.join(os.path.split(filepath)[0],'meta'),os.path.split(filepath)[1].replace(extension,'.json'))
d.meta = json.load(open(jsonfile)) if os.path.exists(jsonfile) else {}
d.meta['filepath'] = filepath
d.meta['stamp'] = os.path.split(filepath)[1][:15]
d.meta['name'] = os.path.split(filepath)[1][:-4]
return d
def load_multiple(filepaths=[]):
'''
Open file dialog and load .dat or .csv
Return pandas dataframe with bonus attributes .filepath, .stamp and .meta (from meta/json file)
'''
if not filepaths:
fileDialog = QtGui.QFileDialog()
filepaths = fileDialog.getOpenFileNames(directory = _DATA_FOLDER)
dlist = []
for filepath in filepaths:
filepath = str(filepath)
extension = filepath[-4:]
if '.dat' in filepath:
d = pd.read_csv(filepath,sep='\t')
elif '.csv' in filepath:
d = pd.read_csv(filepath)
else:
raise Warning("Can't load data. Please supply a .dat or .csv file.")
d = pd.DataFrame()
jsonfile = os.path.join(os.path.join(os.path.split(filepath)[0],'meta'),os.path.split(filepath)[1].replace(extension,'.json'))
d.meta = json.load(open(jsonfile)) if os.path.exists(jsonfile) else {}
d.meta['filepath'] = filepath
d.meta['stamp'] = os.path.split(filepath)[1][:15]
d.meta['name'] = os.path.split(filepath)[1][:-4]
dlist.append(d)
return dlist
def get(stamp):
'''
Get data with given stamp (str), format date_time, found in _DATA_FOLDER
Return pandas dataframe with bonus attributes .filepath, .stamp and .meta (from meta/json file)
'''
if type(stamp)==str:
filepath, jsonfile = find_datafiles(stamp)
if '.dat' in filepath:
d = | pd.read_csv(filepath, sep='\t') | pandas.read_csv |
class Deploy:
'''Functionality for deploying a model to a filename'''
def __init__(self, scan_object, model_name, metric, asc=False):
'''Deploy a model to be used later or in a different system.
NOTE: for a metric that is to be minimized, set asc=True or otherwise
you will end up with the model that has the highest loss.
Deploy() takes in the object from Scan() and creates a package locally
that can be later activated with Restore().
scan_object : object
The object that is returned from Scan() upon completion.
model_name : str
Name for the .zip file to be created.
metric : str
The metric to be used for picking the best model.
asc: bool
Make this True for metrics that are to be minimized (e.g. loss) ,
and False when the metric is to be maximized (e.g. acc)
'''
import os
self.scan_object = scan_object
os.mkdir(model_name)
self.path = model_name + '/' + model_name
self.model_name = model_name
self.metric = metric
self.asc = asc
self.data = scan_object.data
from ..utils.best_model import best_model, activate_model
self.best_model = best_model(scan_object, metric, asc)
self.model = activate_model(scan_object, self.best_model)
# runtime
self.save_model_as()
self.save_details()
self.save_data()
self.save_results()
self.save_params()
self.save_readme()
self.package()
def save_model_as(self):
'''Model Saver
WHAT: Saves a trained model so it can be loaded later
for predictions by predictor().
'''
model_json = self.model.to_json()
with open(self.path + "_model.json", "w") as json_file:
json_file.write(model_json)
self.model.save_weights(self.path + "_model.h5")
print("Deploy package" + " " + self.model_name + " " + "have been saved.")
def save_details(self):
self.scan_object.details.to_csv(self.path + '_details.txt')
def save_data(self):
import pandas as pd
# input data is <= 2d
try:
x = pd.DataFrame(self.scan_object.x[:100])
y = pd.DataFrame(self.scan_object.y[:100])
# input data is > 2d
except ValueError:
x = pd.DataFrame()
y = | pd.DataFrame() | pandas.DataFrame |
from nose.tools import eq_
import pandas as pd
from pavooc.preprocessing.generate_pdb_bed import pdb_coordinates
def test_pdb_coordinates_forward_strand():
# SP_BEG is corrected already. In file SP_BEG would be 6
pdb = | pd.Series({'SP_BEG': 5, 'SP_END': 34, 'PDB': 'ABC'}) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.feature_selection import (
f_regression,
SelectKBest,
SelectFromModel,
)
from sklearn.linear_model import Lasso
from sklearn.datasets import load_boston
from feature_engine.wrappers import SklearnTransformerWrapper
def test_sklearn_imputer_numeric_with_constant(df_na):
variables_to_impute = ["Age", "Marks"]
na_variables_left_after_imputation = [
col
for col in df_na.loc[:, df_na.isna().any()].columns
if col not in variables_to_impute
]
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value=-999, strategy="constant"),
variables=variables_to_impute,
)
# transformed dataframe
ref = df_na.copy()
ref[variables_to_impute] = ref[variables_to_impute].fillna(-999)
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
assert transformer.variables == variables_to_impute
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(
dataframe_na_transformed[na_variables_left_after_imputation].isna().sum() != 0
)
assert all(dataframe_na_transformed[variables_to_impute].isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_imputer_object_with_constant(df_na):
variables_to_impute = ["Name", "City"]
na_variables_left_after_imputation = [
col
for col in df_na.loc[:, df_na.isna().any()].columns
if col not in variables_to_impute
]
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value="missing", strategy="constant"),
variables=variables_to_impute,
)
# transformed dataframe
ref = df_na.copy()
ref[variables_to_impute] = ref[variables_to_impute].fillna("missing")
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
assert transformer.variables == variables_to_impute
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(
dataframe_na_transformed[na_variables_left_after_imputation].isna().sum() != 0
)
assert all(dataframe_na_transformed[variables_to_impute].isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_imputer_allfeatures_with_constant(df_na):
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value="missing", strategy="constant")
)
# transformed dataframe
ref = df_na.copy()
ref = ref.fillna("missing")
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(dataframe_na_transformed.isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_standardscaler_numeric(df_vartypes):
variables_to_scale = ["Age", "Marks"]
transformer = SklearnTransformerWrapper(
transformer=StandardScaler(), variables=variables_to_scale
)
ref = df_vartypes.copy()
ref[variables_to_scale] = (
ref[variables_to_scale] - ref[variables_to_scale].mean()
) / ref[variables_to_scale].std(ddof=0)
transformed_df = transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
# fit params
assert transformer.input_shape_ == (4, 5)
assert (transformer.transformer.mean_.round(6) == np.array([19.5, 0.75])).all()
assert all(transformer.transformer.scale_.round(6) == [1.118034, 0.111803])
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_standardscaler_object(df_vartypes):
variables_to_scale = ["Name"]
transformer = SklearnTransformerWrapper(
transformer=StandardScaler(), variables=variables_to_scale
)
with pytest.raises(TypeError):
transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
def test_sklearn_standardscaler_allfeatures(df_vartypes):
transformer = SklearnTransformerWrapper(transformer=StandardScaler())
ref = df_vartypes.copy()
variables_to_scale = list(ref.select_dtypes(include="number").columns)
ref[variables_to_scale] = (
ref[variables_to_scale] - ref[variables_to_scale].mean()
) / ref[variables_to_scale].std(ddof=0)
transformed_df = transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
# fit params
assert transformer.input_shape_ == (4, 5)
assert (transformer.transformer.mean_.round(6) == np.array([19.5, 0.75])).all()
assert all(transformer.transformer.scale_.round(6) == [1.118034, 0.111803])
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_ohe_object_one_feature(df_vartypes):
variables_to_encode = ["Name"]
transformer = SklearnTransformerWrapper(
transformer=OneHotEncoder(sparse=False, dtype=np.int64),
variables=variables_to_encode,
)
ref = pd.DataFrame(
{
"Name": ["tom", "nick", "krish", "jack"],
"Name_jack": [0, 0, 0, 1],
"Name_krish": [0, 0, 1, 0],
"Name_nick": [0, 1, 0, 0],
"Name_tom": [1, 0, 0, 0],
}
)
transformed_df = transformer.fit_transform(df_vartypes[variables_to_encode])
# init params
assert isinstance(transformer.transformer, OneHotEncoder)
assert transformer.variables == variables_to_encode
# fit params
assert transformer.input_shape_ == (4, 1)
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_ohe_object_many_features(df_vartypes):
variables_to_encode = ["Name", "City"]
transformer = SklearnTransformerWrapper(
transformer=OneHotEncoder(sparse=False, dtype=np.int64),
variables=variables_to_encode,
)
ref = pd.DataFrame(
{
"Name": ["tom", "nick", "krish", "jack"],
"City": ["London", "Manchester", "Liverpool", "Bristol"],
"Name_jack": [0, 0, 0, 1],
"Name_krish": [0, 0, 1, 0],
"Name_nick": [0, 1, 0, 0],
"Name_tom": [1, 0, 0, 0],
"City_Bristol": [0, 0, 0, 1],
"City_Liverpool": [0, 0, 1, 0],
"City_London": [1, 0, 0, 0],
"City_Manchester": [0, 1, 0, 0],
}
)
transformed_df = transformer.fit_transform(df_vartypes[variables_to_encode])
# init params
assert isinstance(transformer.transformer, OneHotEncoder)
assert transformer.variables == variables_to_encode
# fit params
assert transformer.input_shape_ == (4, 2)
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_ohe_numeric(df_vartypes):
variables_to_encode = ["Age"]
transformer = SklearnTransformerWrapper(
transformer=OneHotEncoder(sparse=False, dtype=np.int64),
variables=variables_to_encode,
)
ref = pd.DataFrame(
{
"Age": [20, 21, 19, 18],
"Age_18": [0, 0, 0, 1],
"Age_19": [0, 0, 1, 0],
"Age_20": [1, 0, 0, 0],
"Age_21": [0, 1, 0, 0],
}
)
transformed_df = transformer.fit_transform(df_vartypes[variables_to_encode])
# init params
assert isinstance(transformer.transformer, OneHotEncoder)
assert transformer.variables == variables_to_encode
# fit params
assert transformer.input_shape_ == (4, 1)
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_ohe_all_features(df_vartypes):
transformer = SklearnTransformerWrapper(
transformer=OneHotEncoder(sparse=False, dtype=np.int64)
)
ref = pd.DataFrame(
{
"Name": ["tom", "nick", "krish", "jack"],
"City": ["London", "Manchester", "Liverpool", "Bristol"],
"Age": [20, 21, 19, 18],
"Marks": [0.9, 0.8, 0.7, 0.6],
"dob": pd.date_range("2020-02-24", periods=4, freq="T"),
"Name_jack": [0, 0, 0, 1],
"Name_krish": [0, 0, 1, 0],
"Name_nick": [0, 1, 0, 0],
"Name_tom": [1, 0, 0, 0],
"City_Bristol": [0, 0, 0, 1],
"City_Liverpool": [0, 0, 1, 0],
"City_London": [1, 0, 0, 0],
"City_Manchester": [0, 1, 0, 0],
"Age_18": [0, 0, 0, 1],
"Age_19": [0, 0, 1, 0],
"Age_20": [1, 0, 0, 0],
"Age_21": [0, 1, 0, 0],
"Marks_0.6": [0, 0, 0, 1],
"Marks_0.7": [0, 0, 1, 0],
"Marks_0.8": [0, 1, 0, 0],
"Marks_0.9": [1, 0, 0, 0],
"dob_2020-02-24T00:00:00.000000000": [1, 0, 0, 0],
"dob_2020-02-24T00:01:00.000000000": [0, 1, 0, 0],
"dob_2020-02-24T00:02:00.000000000": [0, 0, 1, 0],
"dob_2020-02-24T00:03:00.000000000": [0, 0, 0, 1],
}
)
transformed_df = transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, OneHotEncoder)
# fit params
assert transformer.input_shape_ == (4, 5)
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_ohe_errors(df_vartypes):
with pytest.raises(AttributeError):
SklearnTransformerWrapper(transformer=OneHotEncoder(sparse=True))
def test_selectKBest_all_variables():
X, y = load_boston(return_X_y=True)
X = pd.DataFrame(X)
selector = SklearnTransformerWrapper(
transformer=SelectKBest(f_regression, k=5),
)
selector.fit(X, y)
X_train_t = selector.transform(X)
pd.testing.assert_frame_equal(X_train_t, X[[2, 5, 9, 10, 12]])
def test_selectFromModel_all_variables():
X, y = load_boston(return_X_y=True)
X = pd.DataFrame(X)
lasso = Lasso(alpha=10, random_state=0)
sfm = SelectFromModel(lasso, prefit=False)
selector = SklearnTransformerWrapper(transformer=sfm)
selector.fit(X, y)
X_train_t = selector.transform(X)
pd.testing.assert_frame_equal(X_train_t, X[[1, 9, 11, 12]])
def test_selectFromModel_selected_variables():
X, y = load_boston(return_X_y=True)
X = pd.DataFrame(X)
lasso = Lasso(alpha=10, random_state=0)
sfm = SelectFromModel(lasso, prefit=False)
selector = SklearnTransformerWrapper(
transformer=sfm, variables=[0, 1, 2, 3, 4, 5],
)
selector.fit(X, y)
X_train_t = selector.transform(X)
| pd.testing.assert_frame_equal(X_train_t, X[[0, 1, 2, 6, 7, 8, 9, 10, 11, 12]]) | pandas.testing.assert_frame_equal |
#%% [markdown]
# # Author : <NAME>
# ***
# ## Capstone Project for Qualifying IBM Data Science Professional Certification
# ***
#%% [markdown]
#
# # Import Packages
#
#%%
import numpy as np # library to handle data in a vectorized manner
import pandas as pd # library for data analsysis
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import pandas as pd
import numpy as np
import re
from nltk import word_tokenize
import nltk
from others.logging_utils import init_logger
from itertools import chain
import geojson
import json
from geopy import distance
from tqdm import tqdm
import os
import gc
def free_space(del_list):
for name in del_list:
if not name.startswith('_'):
del globals()[name]
gc.collect()
def sd(col, max_loss_limit=0.001, avg_loss_limit=0.001, na_loss_limit=0, n_uniq_loss_limit=0, fillna=0):
"""
max_loss_limit - don't allow any float to lose precision more than this value. Any values are ok for GBT algorithms as long as you don't unique values.
See https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_decimal_values_in_[0,_1]
avg_loss_limit - same but calculates avg throughout the series.
na_loss_limit - not really useful.
n_uniq_loss_limit - very important parameter. If you have a float field with very high cardinality you can set this value to something like n_records * 0.01 in order to allow some field relaxing.
"""
is_float = str(col.dtypes)[:5] == 'float'
na_count = col.isna().sum()
n_uniq = col.nunique(dropna=False)
try_types = ['float16', 'float32']
if na_count <= na_loss_limit:
try_types = ['int8', 'int16', 'float16', 'int32', 'float32']
for type in try_types:
col_tmp = col
# float to int conversion => try to round to minimize casting error
if is_float and (str(type)[:3] == 'int'):
col_tmp = col_tmp.copy().fillna(fillna).round()
col_tmp = col_tmp.astype(type)
max_loss = (col_tmp - col).abs().max()
avg_loss = (col_tmp - col).abs().mean()
na_loss = np.abs(na_count - col_tmp.isna().sum())
n_uniq_loss = np.abs(n_uniq - col_tmp.nunique(dropna=False))
if max_loss <= max_loss_limit and avg_loss <= avg_loss_limit and na_loss <= na_loss_limit and n_uniq_loss <= n_uniq_loss_limit:
return col_tmp
# field can't be converted
return col
def reduce_mem_usage_sd(df, deep=True, verbose=False, obj_to_cat=False):
numerics = ['int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2
for col in tqdm(df.columns):
col_type = df[col].dtypes
# collect stats
na_count = df[col].isna().sum()
n_uniq = df[col].nunique(dropna=False)
# numerics
if col_type in numerics:
df[col] = sd(df[col])
# strings
if (col_type == 'object') and obj_to_cat:
df[col] = df[col].astype('category')
if verbose:
print(f'Column {col}: {col_type} -> {df[col].dtypes}, na_count={na_count}, n_uniq={n_uniq}')
new_na_count = df[col].isna().sum()
if (na_count != new_na_count):
print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost na values. Before: {na_count}, after: {new_na_count}')
new_n_uniq = df[col].nunique(dropna=False)
if (n_uniq != new_n_uniq):
print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost unique values. Before: {n_uniq}, after: {new_n_uniq}')
end_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2
percent = 100 * (start_mem - end_mem) / start_mem
print('Mem. usage decreased from {:5.2f} Mb to {:5.2f} Mb ({:.1f}% reduction)'.format(start_mem, end_mem, percent))
return df
def etl_1(data, url_):
#function which return anno in number othwerwise null
def Anno_cleaner(x):
try:
return(float(x))
except:
return(np.nan)
#check if price has da inside price and return --> "Asta" otherwise "no_asta"
def asta(x):
asta = 'no_asta'
try:
if 'da' in x:
asta = 'asta'
except:
return(asta)
return(asta)
#Clean price from.. (Da, Symbol, .)
def clean_price(text):
try:
text = re.sub("da", "", text)
text = re.sub("€", "", text)
text = re.sub(r'\.', '', text)
except:
return(text)
return(text)
#Function which clean sconto by taking out parenthesis, %, -
def clean_sconto(text):
try:
text = re.sub(r"\(", "", text)
text = re.sub(r"\)", "", text)
text = re.sub(r'%', '', text)
text = re.sub(r'-', '', text)
except:
return(text)
return(text)
#Function which clean metri by taking out m2
def clean_metri(text):
try:
text = re.sub(r'm2','', text)
except:
return(text)
return(text)
#function which fill NA with mancante
# def missing_filler(data, char, label = 'mancante'):
# for col in char:
# data[col] = data[col].fillna('mancante')
# return(data)
#Clean out from every special character in special_list
def clean_special(x):
special_list = [r'\:', r'\.', r'\-', r'\_', r'\;', r'\,', r'\'']
for symbol in special_list:
x = re.sub(symbol, ' ', x)
return(x)
#find position from description
def position_cleaner(x):
def cl1(x):
x = re.sub(r'\,', '', x)
x = re.sub(r' +', ' ', x)
return(x)
x = re.sub(r'(\,) +\d+', lambda s: cl1(s.group()), x)
return(x)
#clean string
def formatter(x):
x = x.strip()
x = re.sub(r'\s+', ' ', x)
return(x)
#Clean error from short name
def error_cleaner(x):
x = re.sub(r'v\.le', 'viale', x)
return(x)
#
def address_exctractor(x):
termini_ = ['via privata', 'via', 'viale', 'piazzetta', 'foro', 'cavalcavia',
'giardino', 'vicolo', 'passaggio', 'sito', 'parco', 'sottopasso',
'piazza', 'piazzale', 'largo', 'corso', 'alzaia', 'strada', 'ripa',
'galleria', 'foro', 'bastioni']
x = x.lower()
#find position
x = position_cleaner(x)
#clean error
x = error_cleaner(x)
#find address after termini_
address = ''
for lab_ in termini_:
#search for match
temp = re.search(r'\b%s\b' %lab_, x)
#find address by matching
if (temp is not None):
temp = re.search(r'%s (.*?)\,' %lab_, x)
try:
address_regex = temp.group(0) #if lab_ is not inside the name of the address continue else skip
address = clean_special(address_regex)
except:
pass
#clean ending string
address = formatter(address)
return(address)
#take out number from address to get nome via
def nome_via(x):
return(formatter(re.sub(r'\d+', '', x)))
#take out text and keep number
def numero_via(x):
x = x.lower()
x = re.sub('via 8 ottobre 2001', '', x) #via 8 ottobre exception
digit = re.search(r'\d+', x)
try:
x = digit.group()
except:
return('')
return(re.sub(r'\s+', '', x))
# char = ['Stanze', 'Bagni', 'Piano', 'Garantito', 'stato', 'classe_energetica', 'piano']
data = data.reset_index(drop = True)
url_ = url_.reset_index(drop = True)
#Clean Anno
url_['Anno_Costruzione'] = url_['Anno_Costruzione'].apply(lambda x: Anno_cleaner(x))
url_['Anno_Costruzione'] = url_['Anno_Costruzione'].convert_dtypes()
data = pd.concat([data, url_], axis = 1)
#Clean Prezzo
data['asta'] = data['Prezzo'].apply(lambda s: asta(s))
data['Prezzo'] = data['Prezzo'].apply(lambda s: clean_price(s)).astype(float)
data['Prezzo_Vecchio'] = data['Prezzo_Vecchio'].apply(lambda s: clean_price(s)).astype(float)
data['Sconto'] = data['Sconto'].apply(lambda s: clean_sconto(s)).astype(float)
#Clean Metri
data['Metri'] = data['Metri'].apply(lambda s: clean_metri(s)).astype(float)
data['Prezzo_al_mq'] = data['Prezzo']/data['Metri']
#Clean Piano
data['Piano'] = data['Piano'].replace({'T': 'Terra', 'R': 'Piano Rialzato', 'S': 'Seminterrato', 'A': 'Ultimo'})
# data = missing_filler(data, char)
#extract Indirizzo, Nome Via and numero via
data['indirizzo'] = data['Posizione'].apply(lambda x: address_exctractor(x))
data['nome_via'] = data.indirizzo.apply(lambda s: nome_via(s))
data['numero_via'] = data.indirizzo.apply(lambda s: numero_via(s))
return(data)
def etl_2(args, data):
#Function which calculate intersection score betweem
def scorer(segment_1, segment_2, missing_pos, indirizzo, original, logger):
vec = []
#cycle over each missing position
for m_1 in missing_pos:
vec_2 = np.zeros(indirizzo.shape[0])
#calculate intersection between segment_1, segment_1 to normalize
intersection_top = segment_1[m_1] & segment_1[m_1]
#calculate score of intersection to normalize
top_ = score_intersection(intersection_top)
#iterate over each indirizzo to calculate score of intersection
for m_2 in range(indirizzo.shape[0]):
#calculate intersection set
intersection_try = segment_1[m_1] & segment_2[m_2]
#calculate score
vec_2[m_2] = score_intersection(intersection_try)
#find max
max_ = np.max(vec_2)
#count how many are equal to max score
len_max = np.sum(vec_2 == max_)
#if normalize score assign new indirizzo
if max_/top_ > args.treshold:
if len_max>1:
#in case of ties take indirizzo with nearest number address
number_ = number_intersection(segment_1[m_1], segment_2[vec_2 == max_].values)
#find which address is selected
pos = (np.where(vec_2 == max_)[0])[number_]
#add indirizzo
vec += [indirizzo[pos]]
#print correction with score
logger.info('Segmento errore: {}; via scelta: {}; Match: {}'.format(original[m_1], indirizzo[pos], max_/top_))
else:
#assign indirizzo with max score
vec += [indirizzo[np.argmax(vec_2)]]
logger.info('Via originale: {}; Via scelta: {}; Match: {}'.format(original[m_1],
indirizzo[np.argmax(vec_2)], max_/top_))
else:
vec += [np.nan]
logger.info('errore no match, score {} -- via originale: {}; Matched: {}'.format(max_/top_, original[m_1],
indirizzo[np.argmax(vec_2)]))
#this home didn't find any real address to match up
logger.info('\n\n''{} of home deleted cause error in address typing\n\n'.format(np.sum([pd.isna(x) for x in vec])))
return(vec)
#replace special character with space
def special_delete(x, punctuations = '@#!?+&*[]-%.:/();$=><|{}^' + "'`"):
for p in punctuations:
x = x.replace(p, ' ')
x = x.replace('è', 'e')
x = x.replace('é', 'e')
x = x.replace('ù', 'u')
x = x.replace('à', 'a')
x = x.replace('ò', 'o')
x = x.replace('ì', 'i')
x = re.sub(r"([0-9]+(\.[0-9]+)?)",r" \1 ", x)
return(x)
#extract number
def exctract_number(x):
try:
return(re.search(r'\b\d+\b', x).group(0))
except:
return('')
#clean number of nil
def number_nil_clean(x):
x = re.sub(r'\\+\d+', '', x)
x = re.sub(r'\/+\d+', '', x)
x = re.sub(r'[a-zA-Z]+\d+', '', x)
x = re.sub(r'[a-zA-Z]+', '', x)
return(x)
#replace special punctuations and accented letters
def special_space(x, punctuations = '@#!?+&*[]-%.:/();$=><|{}^' + "'`"):
for p in punctuations:
x = x.replace(p, f' {p} ')
x = x.replace('è', 'e')
x = x.replace('é', 'e')
x = x.replace('ù', 'u')
x = x.replace('à', 'a')
x = x.replace('ò', 'o')
x = x.replace('ì', 'i')
x = re.sub(r"([0-9]+(\.[0-9]+)?)",r" \1 ", x)
return(x)
#little clean for f.lli
def abbreviazioni_replace(x):
x = x.replace('f.lli', 'fratelli')
return(x)
#aler cleaner --> to calculate intersection
def aler_formatter(x):
x = x.lower()
x = word_tokenize(x)
x = sorted(x)
x = ' '.join(x)
return(x)
#Function which give 0.5 for each common digit and 1 for each common word
def score_intersection(intersection):
number = 0
word = 0
for x in intersection:
if x.isdigit():
number += 1
else:
word += 1
return(number*.5 + word)
#calculate number of intersection in case of ties for same indirizzo
def number_intersection(fake, possibilities):
number_list = []
#cycle over each possible indirizzo
for x in possibilities:
#take out everything apart form number
try:
number_list += [float(re.search(r'\d+', ' '.join(x)).group())]
#if no number then np.inf
except:
number_list += [np.inf]
#take out everything apart form number
try:
number_fake = float(re.search(r'\d+', ' '.join(fake)).group())
#if it has no number assign the median of each indirizzo in possibilities
except:
#calculate median over each number of address
mode = median_modded(number_list)
#find correct address
mask = [x == mode for x in number_list]
pos = np.where(mask)[0]
#take indirizzo text
if len(pos)>0:
return(pos[0].item())
else:
return(pos.item())
#take indirizzo nearest to the one provided in fake
result = [abs(x - number_fake) for x in number_list]
#calculate final indirizzo
final_indirizzo = np.argmin(result)
return(final_indirizzo)
#calculate median of number list
def median_modded(lst):
#sort
sortedLst = sorted(lst)
lstLen = len(lst)
#take median element
index = (lstLen - 1) // 2
return sortedLst[index]
logger_aler = init_logger(log_file = args.path_etl2_log_aler)
#filter out home without any indirizzo
data = data.loc[data.indirizzo != ''].reset_index(drop = True)
#read open dataset
#aler list
aler_home = pd.read_pickle(os.path.join(args.path_openMilano, 'data_case_popolari.pkl'))
#nil
nil = pd.read_csv(os.path.join(args.path_datasetMilano, 'ds634_civici_coordinategeografiche.csv'))
#extract number address
aler_home['number_address'] = aler_home['number_address'].apply(lambda x: exctract_number(x))
#clean indirizzo by concatenate address and number address
aler_home['indirizzo'] = aler_home['address'] + ' ' + aler_home['number_address']
#special aler formatter --> for later to calculate score of intersection with address lists
aler_home['indirizzo'] = aler_home['indirizzo'].apply(lambda x: aler_formatter(x))
#interest columns name
interest_col = ['RESIDENZIALE', 'MUNICIPIO', 'ID_NIL', 'NIL', 'TIPO', 'NUMEROCOMPLETO', 'DENOMINAZIONE']
#convert each to string
#for col in interest_col:
# nil[col] = nil[col].copy().astype(str)
#calculate mean for long and lat because we have more long lat for each address
nil_mean = nil.loc[
:, interest_col + ['LONG_WGS84', 'LAT_WGS84']
].reset_index(drop = True).groupby(interest_col).mean()
nil = pd.DataFrame(nil_mean).reset_index()
#change numero completo to str
nil['NUMEROCOMPLETO'] = nil['NUMEROCOMPLETO'].astype(str)
#take out row with long null (lat will be null also)
nil = nil[~nil['LONG_WGS84'].isnull()].reset_index(drop = True)
#little clean of Tipo ( Via, piazza, ...)
nil['TIPO'] = nil['TIPO'].apply(lambda s: s.lower())
#little clean and extraction of numero civico
#nil['NUMERO CIVICO'] = nil['NUMERO CIVICO'].apply(lambda s: number_nil_clean(s))
#little clean of denominazione via
#nil['DENOMINAZIONE'] = nil['DENOMINAZIONE'].apply(lambda s: s.lower())
#Addedd indirizzo by concatenation of Tipo, denominazione via and numero civico
nil['indirizzo'] = nil['TIPO'] + ' ' + nil['DENOMINAZIONE'] + ' ' + nil['NUMEROCOMPLETO']
nil['indirizzo'] = nil['indirizzo'].apply(lambda x: x.lower())
#drop each duplicates
nil = nil.drop_duplicates(['DENOMINAZIONE', 'indirizzo', 'NUMEROCOMPLETO']).reset_index(drop = True)
#apply special space to add space to each special character --> word_tokenize --> sort --> join with ' ' to create join key
data['join_key'] = data['indirizzo'].apply(lambda s: ' '.join(sorted(word_tokenize(special_space(s)))))
nil['join_key'] = nil['indirizzo'].apply(lambda s: ' '.join(sorted(word_tokenize(special_space(abbreviazioni_replace(s))))))
################# ALER CHECKER
#join with nil to add to aler LONG, LAT
temp = aler_home.merge(nil[['join_key', 'LONG_WGS84', 'LAT_WGS84']],
how = 'left', left_on = 'indirizzo', right_on = 'join_key')['LONG_WGS84']
#find which LONG is missing
missing_pos = np.where(temp.isnull())[0].tolist()
#special cleaning for aler wich deletes special characters --> word_tokenize --> create set
segment_1 = aler_home['indirizzo'].apply(lambda s: set(word_tokenize(special_delete(s))))
segment_2 = nil['indirizzo'].apply(lambda s: set(word_tokenize(special_delete(abbreviazioni_replace(s)))))
#calculate corrected indirizzo by checking which indirizzo from nil have higher score with aler_home indirizzo by checking intersection of word/number
logger_aler.info('*'*100 + '\n\nBeginning scorer for aler\n\n')
aler_home.loc[missing_pos, 'indirizzo'] = scorer(segment_1 = segment_1,
segment_2 = segment_2,
indirizzo = nil['indirizzo'],
original = aler_home['indirizzo'],
missing_pos = missing_pos,
logger = logger_aler)
#take out every row with missing address after correction
mask_aler = data['indirizzo'].isnull()
aler_home = aler_home.loc[~mask_aler].reset_index(drop = True)
#little clean and join with nil after correction of address
aler_home['indirizzo'] = aler_home['indirizzo'].apply(lambda x: aler_formatter(x))
#join with nil
aler_home = aler_home.merge(nil[['LONG_WGS84', 'LAT_WGS84', 'join_key']], how = 'left',
left_on = ['indirizzo'],
right_on = ['join_key'])
#drop join key and save
aler_home = aler_home.drop('join_key', axis = 1)
######################### SINGLE ERROR CHECK
#calculate set over join_key for scraped dataset and nil
fakeword = set(word_tokenize(' '.join(data['join_key'])))
realword = set(word_tokenize(' '.join(nil['join_key'])))
#list of real word
realword_list = list(realword)
#check to correct mispel from scraped address
#find word which are inside fakeword but not in realword
mispell = fakeword ^ realword & fakeword
#it's a misple if it's a word of 3 or more caracters
mispell = [x for x in mispell if len(x)>3]
#find which words to delete
to_del = []
logger_data = init_logger(log_file = args.path_etl2_log_data)
logger_data.info('*'*100 + '\n\nBeginning Mispell Correction\n\n')
#cycle over each mispel and calculate edit_distance with nltk
for mis in mispell:
#calculate edit_distance with each real_word
dist_list = [nltk.edit_distance(x, mis) for x in realword_list]
#take min correction--> in case of ties select the first one
correct = realword_list[np.argmin(dist_list)]
#if mispel has distance equal to 1 correct
if np.min(dist_list)==1:
#print Mispel and correction
logger_data.info('Mispell: {}, Correct: {}'.format(mis, correct))
#if corrected cycle over each word and replace mispel
for r in range(data.shape[0]):
#replace mispel with correction
data.loc[r, 'indirizzo'] = data.loc[r, 'indirizzo'].replace(f'{mis}', f'{correct}')
#add mispel corrected to list
to_del += [mis]
#take out row with uncorrected mispel
row_with_mispell = [x for x in mispell if x not in to_del]
data = data[[np.sum([y in x for y in row_with_mispell])==0 for x in data.indirizzo]].reset_index(drop = True)
#special cleaning to create join_key
data['join_key'] = data['indirizzo'].apply(lambda s: ' '.join(sorted(word_tokenize(special_space(s)))))
#check if there are new word wich doesn't match with real list
joined_set = set(word_tokenize(' '.join(data['join_key'])))
joined_error = (joined_set ^ realword) & joined_set
for x in joined_error:
if len(x)>4:
print(f'Problem: {x}')
###########################
#merge with nil to get NIL
temp = data.merge(nil[['join_key', 'NIL']],
how = 'left', left_on = 'join_key', right_on = 'join_key')['NIL']
#take out NIL position
missing_pos = np.where(temp.isnull())[0].tolist()
#calculate (after mispel correction) set to calculate score of intersection
segment_1 = data['indirizzo'].apply(lambda s: set(word_tokenize(special_delete(s))))
segment_2 = nil['indirizzo'].apply(lambda s: set(word_tokenize(special_delete(abbreviazioni_replace(s)))))
#calculate score of intersection
logger_data.info('*'*100 + '\n\nBeginning scorer for scraped dataset\n\n')
data.loc[missing_pos, 'indirizzo'] = scorer(segment_1 = segment_1,
segment_2 = segment_2,
indirizzo = nil['indirizzo'],
original = data['indirizzo'],
missing_pos = missing_pos,
logger = logger_data)
#take out null address
data = data[~data['indirizzo'].isnull()].reset_index(drop = True)
#create join_key
data['join_key'] = data['indirizzo'].apply(lambda s: ' '.join(sorted(word_tokenize(special_space(s)))))
#merge with nil
data = data.merge(nil[['join_key','RESIDENZIALE', 'MUNICIPIO', 'ID_NIL', 'NIL', 'LONG_WGS84', 'LAT_WGS84']],
how = 'left', left_on = 'join_key', right_on = 'join_key')
return(data, aler_home)
def etl_geo(args, data):
def lower_cleaner_na(x):
if pd.isna(x):
return x
else:
return x.lower()
#retain only corrected store... Esselunga abc --> Esselunga
def correct_store(store, supermercati):
if pd.isna(store):
return(store)
for sup in supermercati:
if sup in store:
return(sup)
return(store)
def seconda_linea(x):
if len(x) == 1:
return('')
else:
return(re.sub(r'.*\,', '', x))
#take first element
def take_first(x):
while True:
dim = np.array(x, dtype = object).shape
if len(dim) == 2:
return(x)
x = x[0]
#calculate lowest distance to given element
def distanza_home_element(casa, element, long_label, lat_label, index):
#calculate long, lat of home
long_casa, lat_casa = casa.LONG_WGS84, casa.LAT_WGS84
vec = []
#calculate each distance to every store of same categories
for _, row in element.iterrows():
long_element, lat_element = row[long_label], row[lat_label]
dist = distance.distance((lat_casa, long_casa), (lat_element, long_element)).kilometers
vec += [dist]
if index:
return((np.min(vec), np.argmin(vec)))
else:
return(np.min(vec))
#calculate nearest distance vector
def dist_df(data, element, filter_var = None, label_filter = None, long_label = 'LONG', lat_label = 'LAT', index = False):
#if index take index of nearest and distance otherwise only distance
if index:
vec_dist, vec_idx = [], []
else:
vec_dist = []
#keep only element after filter
if (filter_var is not None) & (label_filter is not None):
element = element.loc[element[filter_var] == label_filter].reset_index(drop = True)
if (filter_var is not None) ^ (label_filter is not None):
raise ValueError("filter or label filter missing")
for _, row in tqdm(data.iterrows()):
row_result = distanza_home_element(row, element, long_label, lat_label, index)
if index:
vec_dist += [row_result[0]]
vec_idx += [row_result[1]]
else:
vec_dist += [row_result]
if index:
return((vec_dist, vec_idx))
else:
return(vec_dist)
#count how many stores are inside the selected radius
def radius_df(data, element, radius, filter_var = None, filter_label = None, long_label = 'LONG', lat_label = 'LAT'):
vec = []
#keep only element after filter
if (filter_var is not None) & (filter_label is not None):
element = element.loc[element[filter_var] == filter_label].reset_index(drop = True)
if (filter_var is not None) ^ (filter_label is not None):
raise ValueError("filter or label filter missing")
for _, row in tqdm(data.iterrows()):
vec += [sum_inside_radius_df(row, element, radius, long_label, lat_label)]
return(vec)
#calculate how many supermercati are inside radius
def sum_inside_radius_df(casa, element, radius, long_label, lat_label):
long_casa, lat_casa = casa.LONG_WGS84, casa.LAT_WGS84
vec = []
#find distance of each home-store
for _, row in element.iterrows():
long_store, lat_store = row[long_label], row[lat_label]
vec += [distance.distance((lat_casa, long_casa), (lat_store, long_store)).kilometers]
#find how many store are nearest than radius
vec = [x <= radius for x in vec]
result = np.sum(vec)
return(result)
#calculate number of reati inside selected radius of a selected reato
def radius_json(data, element, radius, filter_label = None):
vec = []
#keep only element after filter
if (filter_label is not None):
element = element[filter_label]
for _, row in tqdm(data.iterrows()):
vec += [sum_inside_radius_json(row, element, radius)]
return(vec)
#calculate how many reati were done inside selected radius from the seleted home of a selected reato
def sum_inside_radius_json(casa, element, radius):
long_casa, lat_casa = casa.LONG_WGS84, casa.LAT_WGS84
vec = [distance.distance((lat_casa, long_casa), (lat_store, long_store)).kilometers < radius for lat_store, long_store in element]
result = np.sum(vec)
return(result)
#drop missing lat, long
mask = (data.LONG_WGS84.isnull()) | (data.LONG_WGS84.isnull())
data = data[~mask].reset_index(drop = True)
try:
missing_file = 'economia_media_grande_distribuzione_coord.csv'
negozi = pd.read_csv(os.path.join(args.path_datasetMilano, 'economia_media_grande_distribuzione_coord.csv'))
missing_file = 'ds634_civici_coordinategeografiche.csv'
nil_geo = pd.read_csv(os.path.join(args.path_datasetMilano, 'ds634_civici_coordinategeografiche.csv'))
missing_file = 'tpl_metrofermate.geojson'
with open(os.path.join(args.path_datasetMilano, 'tpl_metrofermate.geojson')) as f:
fermate_json = json.load(f)
missing_file = 'parchi.geojson'
with open(os.path.join(args.path_datasetMilano, 'parchi.geojson')) as f:
parchi_json = json.load(f)
missing_file = 'scuole_infanzia.geojson'
with open(os.path.join(args.path_datasetMilano, 'scuole_infanzia.geojson')) as f:
scuole_infanzia_json = json.load(f)
missing_file = 'scuole_primarie.geojson'
with open(os.path.join(args.path_datasetMilano, 'scuole_primarie.geojson')) as f:
scuole_primarie_json = json.load(f)
missing_file = 'scuole_secondarie_1grado.geojson'
with open(os.path.join(args.path_datasetMilano, 'scuole_secondarie_1grado.geojson')) as f:
scuole_secondarie_json = json.load(f)
missing_file = 'scuole_secondarie_secondogrado.geojson'
with open(os.path.join(args.path_datasetMilano, 'scuole_secondarie_secondogrado.geojson')) as f:
scuole_secondarie_2_json = json.load(f)
missing_file = 'criminality_info.pkl'
criminality = pd.read_pickle(os.path.join(args.path_openMilano, 'criminality_info.pkl'))
del missing_file
except:
print(f'Missing file: {missing_file}\n')
#create dictionary of news: gpslocation
criminality = {x: [y['gps'] for y in criminality[x] if y['gps'] is not None] for x in criminality.keys()}
#drop join_key
data = data.drop('join_key', axis = 1)
#NEGOZI
#lowe cleaning
negozi['settore_merceologico'] = negozi['settore_merceologico'].apply(lambda x: lower_cleaner_na(x))
negozi['insegna'] = negozi['insegna'].apply(lambda x: lower_cleaner_na(x))
negozi['DescrizioneVia'] = negozi['DescrizioneVia'].apply(lambda x: lower_cleaner_na(x))
#correct store depending on supermercati
negozi['insegna_corretta'] = negozi['insegna'].apply(lambda x: correct_store(x, args.supermercati))
#keep only supermercati inside supermercati list
negozi = negozi[[x in args.supermercati for x in negozi['insegna_corretta']]]
#cleaning of columns and create mean of lat, long by description --> have only one value
nil_geo['DESCRIZIONE'] = (nil_geo.TIPO + ' ' + nil_geo.DENOMINAZIONE).apply(lambda x: lower_cleaner_na(x))
nil_geo = nil_geo[['DESCRIZIONE', 'LONG_WGS84', 'LAT_WGS84']].groupby('DESCRIZIONE').mean().reset_index()
#take out null rows
temp = negozi[negozi.LAT_WGS84.isnull()].copy()
#merge negozi with nil_geo to take coordinate
new_value = temp.merge(nil_geo, how = 'left', left_on = 'DescrizioneVia', right_on = 'DESCRIZIONE').iloc[:,-2:].values
#assign new lat, long value
negozi.loc[negozi.LAT_WGS84.isnull(),'LONG_WGS84'] = new_value[:,0]
negozi.loc[negozi.LAT_WGS84.isnull(),'LAT_WGS84'] = new_value[:,1]
#filter null row
negozi = negozi[~negozi['LONG_WGS84'].isnull()]
#check distance to store
print('Beginning Supermercati\n')
for store in args.supermercati:
print(f'\nStore: {store}\n')
data[store+'_distanza'] = dist_df(data, negozi, filter_var = 'insegna', label_filter = store, long_label = "LONG_WGS84", lat_label = "LAT_WGS84")
#count how many stores are in radius of radius_list
print('\nBeginning Supermercati radius\n')
for kilometer in args.radius_list:
print(f'\nRadius: {kilometer}\n')
data['store_radius_' + str(kilometer) + "_km"] = radius_df(data, negozi, kilometer, long_label = "LONG_WGS84", lat_label = "LAT_WGS84")
#find minimum distance of each store to a selected home
mask_column = [x for x in data.columns if re.search('distanza', x)]
data['distanza_minima_supermercato'] = data[mask_column].min(axis = 1)
#find supermercato più vicino
mask_column = [x for x in data.columns if re.search('distanza', x)]
data['supermercato_vicino'] = data[mask_column].idxmin(axis = 1).apply(lambda x: re.sub('_distanza', '', x))
#clean fermate_json
fermate = | pd.json_normalize(fermate_json['features']) | pandas.json_normalize |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith("M8"):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns"])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
r" \[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
r" \[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
df.astype(np.int8, errors="ignore")
def test_arg_for_errors_in_astype_dictlist(self):
# GH-25905
df = pd.DataFrame(
[
{"a": "1", "b": "16.5%", "c": "test"},
{"a": "2.2", "b": "15.3", "c": "another_test"},
]
)
expected = pd.DataFrame(
[
{"a": 1.0, "b": "16.5%", "c": "test"},
{"a": 2.2, "b": "15.3", "c": "another_test"},
]
)
type_dict = {"a": "float64", "b": "float64", "c": "object"}
result = df.astype(dtype=type_dict, errors="ignore")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["a", "b"])}
),
True,
),
# differ types
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
# differ sizes
(
DataFrame(
{
"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64),
}
),
False,
),
# multi-extension differ
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["b", "c"])}
),
False,
),
],
)
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype="object")
tm.assert_numpy_array_equal(result, expected)
def test_str_to_small_float_conversion_type(self):
# GH 20388
np.random.seed(13)
col_data = [str(np.random.random() * 1e-12) for _ in range(5)]
result = pd.DataFrame(col_data, columns=["A"])
expected = pd.DataFrame(col_data, columns=["A"], dtype=object)
tm.assert_frame_equal(result, expected)
# change the dtype of the elements from object to float one by one
result.loc[result.index, "A"] = [float(x) for x in col_data]
expected = pd.DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
class TestDataFrameDatetimeWithTZ:
def test_interleave(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
def test_astype(self, timezone_frame):
# astype
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
| Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern") | pandas.Timestamp |
"""
content level plays, timespent and ratings by week
"""
import json
import sys, time
import pdb
import os
import pandas as pd
from datetime import datetime, timedelta, date
from pathlib import Path
from string import Template
from azure.common import AzureMissingResourceHttpError
from cassandra.cluster import Cluster
from dataproducts.util.utils import create_json, get_tenant_info, get_data_from_blob, \
post_data_to_blob, get_content_model, get_content_plays, push_metric_event
class ContentConsumption:
def __init__(self, data_store_location, org_search, druid_hostname,
cassandra_host, keyspace_prefix, execution_date=date.today().strftime("%d/%m/%Y")):
self.data_store_location = data_store_location
self.org_search = org_search
self.druid_hostname = druid_hostname
self.cassandra_host = cassandra_host
self.keyspace_prefix = keyspace_prefix
self.execution_date = execution_date
self.config = {}
def mime_type(self, series):
"""
map the content format into preset buckets
:param series: pandas series
:return: pandas series
"""
if series == 'video/x-youtube':
return 'YouTube Content'
elif series == 'application/vnd.ekstep.ecml-archive':
return 'Created on Diksha'
elif series == 'video/mp4' or series == 'video/webm':
return 'Uploaded Videos'
elif series == 'application/pdf' or series == 'application/epub':
return 'Text Content'
elif series == 'application/vnd.ekstep.html-archive' or series == 'application/vnd.ekstep.h5p-archive':
return 'Uploaded Interactive Content'
else:
return None
def define_keyspace(self, cassandra_, keyspace_, replication_factor_=1):
"""
given cassandra cluster, keyspace and replication factor, ensure the keyspace and table exist
:param cassandra_: IP address of the Casssandra cluster
:param keyspace_: Keyspace name for the cassandra cluster
:param replication_factor_: replication factor used in cassandra cluster
:return: None
"""
cluster = Cluster([cassandra_])
session = cluster.connect()
keyspace_query = Template("""CREATE KEYSPACE IF NOT EXISTS $keyspace WITH replication = {
'class': 'SimpleStrategy',
'replication_factor': '$replication_factor'
}""")
session.execute(keyspace_query.substitute(keyspace=keyspace_, replication_factor=replication_factor_))
table_query = Template("""CREATE TABLE IF NOT EXISTS $keyspace.content_aggregates (
content_id text,
period int,
pdata_id text,
metric map<text, double>,
PRIMARY KEY (content_id, period, pdata_id)
)""")
session.execute(table_query.substitute(keyspace=keyspace_))
def insert_data_to_cassandra(self, result_loc_, date_, cassandra_, keyspace_):
"""
Insert the content plays and timespent data into cassandra with primary key on content id, date and pdata_id
:param result_loc_: local path to store resultant csv
:param date_: datetime object to pass to file path
:param cassandra_: ip of the cassandra cluster
:param keyspace_: keyspace in which we are working
:return: None
"""
data = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'content_plays.csv'))
cluster = Cluster([cassandra_])
session = cluster.connect()
insert_query = Template("""INSERT INTO $keyspace.content_aggregates(content_id, period, pdata_id, metric)
VALUES ('$content_id', $period, '$pdata_id', {'plays': $plays, 'timespent': $timespent})""")
for ind, row in data.iterrows():
content_id = row['object_id']
period = row['Date']
pdata_id = row['dimensions_pdata_id']
plays = row['Number of plays']
timespent = row['Total time spent']
session.execute(
insert_query.substitute(keyspace=keyspace_, content_id=content_id, period=period, pdata_id=pdata_id,
plays=plays, timespent=timespent))
session.shutdown()
cluster.shutdown()
def get_weekly_plays(self, result_loc_, date_, cassandra_, keyspace_):
"""
query cassandra table for 1 week of content play and timespent.
:param result_loc_: local path to store resultant csv
:param date_: datetime object to pass to file path
:param cassandra_: ip of the cassandra cluster
:param keyspace_: keyspace in which we are working
:return: None
"""
tenant_info = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'tenant_info.csv'))[['id', 'slug']]
tenant_info['id'] = tenant_info['id'].astype(str)
tenant_info.set_index('id', inplace=True)
cluster = Cluster([cassandra_])
session = cluster.connect()
start_date = date_ - timedelta(days=7)
fetch_query = Template("""
SELECT content_id, period, pdata_id, metric FROM $keyspace.content_aggregates WHERE
period >= $start_date AND
period < $end_date
ALLOW FILTERING
""")
result = session.execute(fetch_query.substitute(keyspace=keyspace_, start_date=start_date.strftime('%Y%m%d'),
end_date=date_.strftime('%Y%m%d')))
df_dict = {}
for row in result:
if row.content_id in df_dict.keys():
pass
else:
df_dict[row.content_id] = {
'identifier': row.content_id,
'Number of Plays on App': 0,
'Number of Plays on Portal': 0,
'Timespent on App': 0,
'Timespent on Portal': 0
}
pdata_id = 'App' if row.pdata_id == self.config['context']['pdata']['id']['app'] else 'Portal' if \
row.pdata_id == self.config['context']['pdata']['id']['portal'] else 'error'
df_dict[row.content_id]['Number of Plays on ' + pdata_id] += row.metric['plays']
df_dict[row.content_id]['Timespent on ' + pdata_id] = row.metric['timespent']
temp = []
for k, v in df_dict.items():
temp.append(v)
df = pd.DataFrame(temp)
df['Total No of Plays (App and Portal)'] = df['Number of Plays on App'] + df['Number of Plays on Portal']
df['Average Play Time in mins on App'] = round(df['Timespent on App'] / (60 * df['Number of Plays on App']), 2)
df['Average Play Time in mins on Portal'] = round(
df['Timespent on Portal'] / (60 * df['Number of Plays on Portal']), 2)
df['Average Play Time in mins (On App and Portal)'] = round(
(df['Timespent on App'] + df['Timespent on Portal']) / (60 * df['Total No of Plays (App and Portal)']), 2)
df = df[['identifier', 'Total No of Plays (App and Portal)', 'Number of Plays on App', 'Number of Plays on Portal',
'Average Play Time in mins (On App and Portal)',
'Average Play Time in mins on App',
'Average Play Time in mins on Portal']]
content_model = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'content_model_snapshot.csv'))[
['channel', 'board', 'medium', 'gradeLevel', 'subject', 'identifier', 'name', 'mimeType', 'createdOn', 'creator',
'lastPublishedOn', 'me_averageRating']]
content_model["creator"] = content_model["creator"].str.replace("null", "")
content_model['channel'] = content_model['channel'].astype(str)
content_model['mimeType'] = content_model['mimeType'].apply(self.mime_type)
content_model.columns = ['channel', 'Board', 'Medium', 'Grade', 'Subject', 'Content ID', 'Content Name',
'Mime Type', 'Created On', 'Creator (User Name)', 'Last Published On',
'Average Rating(out of 5)']
content_model['Content ID'] = content_model['Content ID'].str.replace(".img", "")
content_model['Created On'] = content_model['Created On'].fillna('T').apply(
lambda x: '-'.join(x.split('T')[0].split('-')[::-1]))
content_model['Last Published On'] = content_model['Last Published On'].fillna('T').apply(
lambda x: '-'.join(x.split('T')[0].split('-')[::-1]))
# content_model['Last Updated On'] = content_model['Last Updated On'].fillna('T').apply(
# lambda x: '-'.join(x.split('T')[0].split('-')[::-1]))
df = content_model.join(df.set_index('identifier'), on='Content ID', how='left')
df['Last Date of the week'] = (date_ - timedelta(days=1)).strftime('%d-%m-%Y')
df['Total No of Plays (App and Portal)'] = df['Total No of Plays (App and Portal)'].fillna(0)
df['Number of Plays on App'] = df['Number of Plays on App'].fillna(0)
df['Number of Plays on Portal'] = df['Number of Plays on Portal'].fillna(0)
df['Average Play Time in mins (On App and Portal)'] = df[
'Average Play Time in mins (On App and Portal)'].fillna(0)
df['Average Play Time in mins on App'] = df['Average Play Time in mins on App'].fillna(0)
df['Average Play Time in mins on Portal'] = df['Average Play Time in mins on Portal'].fillna(
0)
df = df.fillna('Unknown')
df.sort_values(inplace=True, ascending=[1, 1, 1, 1, 1, 0],
by=['channel', 'Board', 'Medium', 'Grade', 'Subject', 'Total No of Plays (App and Portal)'])
df.to_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'weekly_plays.csv'), index=False)
post_data_to_blob(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'weekly_plays.csv'), backup=True)
for channel in df.channel.unique():
try:
slug = tenant_info.loc[channel]['slug']
print(slug)
except KeyError:
continue
content_aggregates = df[df['channel'] == channel]
content_aggregates.drop(['channel'], axis=1, inplace=True)
try:
get_data_from_blob(result_loc_.parent.joinpath('portal_dashboards', slug, 'content_aggregates.csv'))
blob_data = pd.read_csv(result_loc_.parent.joinpath('portal_dashboards', slug, 'content_aggregates.csv'))
except AzureMissingResourceHttpError:
blob_data = | pd.DataFrame() | pandas.DataFrame |
import pandas
import numpy
import similaritymeasures
def stats_between_series(
xaxis_1: pandas.Series,
values_1: pandas.Series,
xaxis_2: pandas.Series,
values_2: pandas.Series,
print_: bool = False,
) -> dict:
"""Dynamic time warping and discret frechet distance for measuring similarity between two temporal sequences
Args:
xaxis_1 (pandas.Series): index axis of the dataframe 1
values_1 (pandas.Series): value axis of the dataframe 1
xaxis_2 (pandas.Series): index axis of the dataframe 2
values_2 (pandas.Series): value axis of the dataframe 2
Returns:
dict: `{"dtw": float, "frechet_dist": float}`
"""
dataframe_1 = pandas.merge(xaxis_1, values_1, right_index=True, left_index=True)
dataframe_2 = pandas.merge(xaxis_2, values_2, right_index=True, left_index=True)
dataframe_1.rename(
columns={xaxis_1.name: "id", values_1.name: "values_1"}, inplace=True
)
dataframe_2.rename(
columns={xaxis_2.name: "id", values_2.name: "values_2"}, inplace=True
)
dataframe_1.set_index("id", inplace=True)
dataframe_2.set_index("id", inplace=True)
unified = pandas.concat([dataframe_1, dataframe_2], axis=1)
unified["values_1"] = (
pandas.to_numeric(unified["values_1"], errors="coerce", downcast="float")
.interpolate()
.fillna(method="bfill")
.fillna(method="ffill")
)
unified["values_2"] = (
| pandas.to_numeric(unified["values_2"], errors="coerce", downcast="float") | pandas.to_numeric |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(
np.arange(6.0).reshape(3, 2),
columns=list("AB"),
index=date_range("1/1/2000", periods=3, freq="1H"),
)
expected = df.copy()
expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
dt2 = Timestamp("20130101 10:00:00")
for conv in [
lambda x: x,
lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(),
lambda x: np.datetime64(x),
]:
df = DataFrame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[[Timestamp("2011-01-01"), Timestamp("2011-01-02")]]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-01"),
]
exp = Series(
[0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name="idx"), name="s"
)
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
Timestamp("2011-01-03"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = | Series([0.1, 0.2], index=idx, name="s") | pandas.Series |
def report_classification(df_features,df_target,algorithms='default',test_size=0.3,scaling=None,
large_data=False,encode='dummy',average='binary',change_data_type = False,
threshold=8,random_state=None):
'''
df_features : Pandas DataFrame
df_target : Pandas Series
algorithms : List ,'default'=
[LogisticRegression(),
GaussianNB(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
XGBClassifier()]
The above are the default algorithms, if one needs any specific algorithms, they have to import
libraries then pass the instances of alogorith as list
For example, if one needs random forest and adaboost only, then pass
algorithms=[RandomForestClassifier(max_depth=8),AdaBoostClassifier()]
But, these libraries must be imported before passing into above list like
test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
scaling : {'standard-scalar', 'min-max'} or None , default=None
encode : {'dummy','onehot','label'} ,default='dummy'
change_data_type : bool, default=False
Some columns will be of numerical datatype though there are only 2-3 unique values in that column,
so these columns must be converted to object as it is more relevant.
By setting change_data_type= True , these columns will be converted into object datatype
threshold : int ,default=8
Maximum unique value a column can have
large_data : bool, default=False
If the dataset is large then the parameter large_data should be set to True,
make sure if your system has enough memory before setting Large_data=True
average : {'micro', 'macro', 'samples','weighted', 'binary'} or None, default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
random_state : int, RandomState instance or None, default=None
'''
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix,roc_auc_score,roc_curve,accuracy_score,recall_score,precision_score
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from warnings import filterwarnings
filterwarnings('ignore')
print("Shape of the data :",df_features.shape)
print("---------------------------------------")
#Check if there is any missing values
if df_features.isna().sum().sum()==0:
df_num=df_features.select_dtypes(exclude="object")
#Some columns will be of numerical datatype though there are only 2-3 unique values in that column
#Here the if-condition will check if the unique values are less than the specified threshold in each column
if change_data_type == True:
for i in df_num.columns:
if len(df_num[i].value_counts())<threshold:
#The datatype will be changed to object if the condition is not satisfied
df_features[i] = df_features[i].astype('object')
print("Datatype of {} changed to 'object as there were less than {} unique values".format(i,threshold))
print("-----------------------------------------------------------------------------------------")
else:
pass
#In some features like movie-tiltle,id,etc where there will be many unique values must be must be dropped
#These features can also be label encoded and then can be passed
df_cat=df_features.select_dtypes(include="object")
for i in df_cat:
if df_features[i].nunique()>threshold:
raise Exception("Recheck the datatype of {}, as there are more than {} unique values or change the datatype of {}".format(i,threshold))
df_num=df_features.select_dtypes(exclude="object")
#Encoding of categorical features
if df_cat.shape[1]!=0:
#Dummy-encoding
if encode == 'dummy':
print("Encoding : Dummy Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat,drop_first=True)
X=pd.concat([encoding,df_num],axis=1)
#Onehot encoding
elif encode == 'onehot':
print("Encoding : One-hot Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat)
X=pd.concat([encoding,df_num],axis=1)
#Label encoding
elif encode == 'label':
print("Encoding : Label Encoding" )
print("---------------------------------------")
encoding=df_cat.apply(LabelEncoder().fit_transform)
X=pd.concat([encoding,df_num],axis=1)
#If there are no categorical features
else:
X=df_features
#Encoding of target column
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(df_target)
#Value count of target column
count=pd.Series(y).value_counts()
print("Value count of target variable :")
for i in range(len(count)):
print("Count of {}s is {} ".format(count.index[i],count.values[i]))
print("---------------------------------------")
#Scaling
#Standard scaling
if scaling=='standard-scalar':
print("Scaling : StandardScalar")
print("---------------------------------------")
ss=StandardScaler()
X=ss.fit_transform(X)
#MinmaxScalar
elif scaling=='min-max':
print("Scaling : MinmaxScalar")
print("---------------------------------------")
mm=MinMaxScaler()
X=mm.fit_transform(X)
else:
print("Scaling : None")
print("---------------------------------------")
#Condition to check how large the data after encoding
if (X.shape[0]*X.shape[1] < 1000000) | large_data==True:
print("Number of Datapoints :",X.shape[0]*X.shape[1])
print("---------------------------------------")
else:
raise Exception("Data too large to process, if you want to still execute, set parameter large_data=False")
#Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
print("Test size for train test split :",test_size)
print("---------------------------------------")
#Algorithms
if algorithms == 'default':
algorithms=[LogisticRegression(),
GaussianNB(),
DecisionTreeClassifier(random_state=random_state),
RandomForestClassifier(random_state=random_state),
GradientBoostingClassifier(random_state=random_state),
AdaBoostClassifier(random_state=random_state),
XGBClassifier(random_state=random_state,verbosity=0)]
else:
algorithms=algorithms
#Binary Classification
if df_target.nunique()<3:
results=pd.DataFrame(columns=["Algorithm_name",'Train_accuracy','Test_accuracy',
"Test_Roc_Auc_score",'Test_recall','Test_precision'])
for i in algorithms:
print("Executing :",i)
i.fit(X_train, y_train)
train_pred_i=i.predict(X_train)
train_acc=accuracy_score(y_train,train_pred_i)
test_pred_i=i.predict(X_test)
test_acc=accuracy_score(y_test,test_pred_i)
recall=recall_score(y_test,test_pred_i,average=average)
precision=precision_score(y_test,test_pred_i,average=average)
roc_auc=roc_auc_score(y_test,test_pred_i)
row={"Algorithm_name":str(i)[:-2],'Train_accuracy':train_acc,"Test_accuracy":test_acc,
"Test_Roc_Auc_score":roc_auc,'Test_recall':recall,"Test_precision":precision}
results=results.append(row,ignore_index=True)
return results
#Multiclass Classification
else:
results=pd.DataFrame(columns=["Algorithm_name",'Train_accuracy','Test_accuracy',"f1_score"])
for i in algorithms:
print("Executing :",i)
i.fit(X_train, y_train)
train_pred_i=i.predict(X_train)
train_acc=accuracy_score(y_train,train_pred_i)
test_pred_i=i.predict(X_test)
test_acc=accuracy_score(y_test,test_pred_i)
f1=recall_score(y_test,test_pred_i,average=average)
row={"Algorithm_name":str(i)[:-2],'Train_accuracy':train_acc,"Test_accuracy":test_acc,"f1_score":f1}
results=results.append(row,ignore_index=True)
return results
else:
raise Exception("The data contains missing values, first handle missing values and then pass the data")
def report_regression(df_features,df_target,algorithms='default',test_size=0.3,
scaling=None,large_data=False,change_data_type=True,encode='dummy',
threshold=8,random_state=None):
'''
df_features : Pandas DataFrame
df_target : Pandas Series
algorithms : List ,'default'=
[LinearRegression(),
Lasso(),
Ridge(),
RandomForestRegressor(),
GradientBoostingRegressor(),
AdaBoostRegressor(),
XGBRegressor]
The above are the default algorithms, if one needs any specific algorithms, they have to import
libraries then pass the instances of alogorith as list
For example, if one needs random forest and adaboost only, then pass
algorithms=[RandomForestRegressor(max_depth=8),AdaBoostRegressor()]
But, these libraries must be imported before passing into above list like
test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
scaling : {'standard-scalar', 'min-max'} or None , default=None
encode : {'dummy','onehot','label'} ,default='dummy'
change_data_type : bool, default=False
Some columns will be of numerical datatype though there are only 2-3 unique values in that column,
so these columns must be converted to object as it is more relevant.
By setting change_data_type= True , these columns will be converted into object datatype
threshold : int ,default=8
Maximum unique value a column can have
large_data : bool, default=False
If the dataset is large then the parameter large_data should be set to True,
make sure if your system has enough memory before setting Large_data=True
random_state : int, RandomState instance or None, default=None
'''
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from xgboost import XGBRegressor
from warnings import filterwarnings
filterwarnings('ignore')
print("Shape of data :",df_features.shape)
print("---------------------------------------")
#Check if there is any missing values
if df_features.isna().sum().sum()==0:
df_num=df_features.select_dtypes(exclude="object")
#Some columns will be of numerical datatype though there are only 2-3 unique values in that column
#Here the if-condition will check if the unique values are less than the specified threshold in each column
if change_data_type == True:
for i in df_num.columns:
#The datatype will be changed to object if the condition is not satisfied
if len(df_num[i].value_counts())<threshold:
df_features[i] = df_features[i].astype('object')
print("Datatype of {} changed to 'object as there were less than {} unique values".format(i,threshold))
print("-----------------------------------------------------------------------------------------")
else:
pass
#In some features like movie-tiltle,id,etc where there will be many unique values must be must be dropped
#These features can also be label encoded and then can be passed
df_cat=df_features.select_dtypes(include="object")
for i in df_cat:
if df_features[i].nunique()>threshold:
raise Exception("Recheck the datatype of {}, as there are more than {} unique values or change the datatype of {}".format(i,threshold))
df_num=df_features.select_dtypes(exclude="object")
#Encoding of categorical features
if df_cat.shape[1]!=0:
#Dummy Encoding
if encode == 'dummy':
print("Encoding : Dummy Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat,drop_first=True)
X=pd.concat([encoding,df_num],axis=1)
#Onehot encoding
elif encode == 'onehot':
print("Encoding : One-hot Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat)
X=pd.concat([encoding,df_num],axis=1)
#Label encoding
elif encode == 'label':
print("Encoding : Label Encoding" )
print("---------------------------------------")
encoding=df_cat.apply(LabelEncoder().fit_transform)
X= | pd.concat([encoding,df_num],axis=1) | pandas.concat |
import inspect
import os
import warnings
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
deprecate_arg,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3], dtype="Float64")),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4], dtype="Float64")),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": pd.Series([1., 2., 3.], dtype="Float64"), "b": pd.Series([4., 5., 6.], dtype="Float64")})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": pd.Series([np.nan, 4, 5, 6], dtype="Float64"), "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": pd.Series([np.nan, np.nan, 1, 0, 1], dtype="Float64")}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name, dtype="Float64"))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index= | pd.Int64Index([1]) | pandas.Int64Index |
import pandas as pd
import numpy as np
from auto_causality.utils import featurize
def nhefs() -> pd.DataFrame:
"""loads the NHEFS dataset
The dataset describes the impact of quitting smoke on weight gain over a period of 11 years
The data consists of the treatment (quit smoking yes no), the outcome (change in weight) and
a series of covariates of which we include a subset of 9 (see below).
If used for academic purposes, pelase consider citing the authors:
<NAME>, <NAME> (2020). Causal Inference: What If. Boca Raton: Chapman & Hall/CRC.
Returns:
pd.DataFrame: dataset with cols "treatment", "y_factual" and covariates "x1" to "x9"
"""
df = pd.read_csv(
"https://cdn1.sph.harvard.edu/wp-content/uploads/sites/1268/1268/20/nhefs.csv"
)
covariates = [
"active",
"age",
"education",
"exercise",
"race",
"sex",
"smokeintensity",
"smokeyrs",
"wt71",
]
has_missing = ["wt82"]
missing = df[has_missing].isnull().any(axis="columns")
df = df.loc[~missing]
df = df[covariates + ["qsmk"] + ["wt82_71"]]
df.rename(columns={"qsmk": "treatment", "wt82_71": "y_factual"}, inplace=True)
df.rename(
columns={c: "x" + str(i + 1) for i, c in enumerate(covariates)}, inplace=True
)
return df
def lalonde_nsw() -> pd.DataFrame:
"""loads the Lalonde NSW dataset
The dataset described the impact of a job training programme on the real earnings
of individuals several years later.
The data consists of the treatment indicator (training yes no), covariates (age, race,
academic background, real earnings 1976, real earnings 1977) and the outcome (real earnings in 1978)
See also https://rdrr.io/cran/qte/man/lalonde.html#heading-0
If used for academic purposes, please consider citing the authors:
Lalonde, Robert: "Evaluating the Econometric Evaluations of Training Programs," American Economic Review,
Vol. 76, pp. 604-620
Returns:
pd.DataFrame: dataset with cols "treatment", "y_factual" and covariates "x1" to "x8"
"""
df_control = pd.read_csv(
"https://users.nber.org/~rdehejia/data/nswre74_control.txt", sep=" "
).dropna(axis=1)
df_control.columns = (
["treatment"] + ["x" + str(x) for x in range(1, 9)] + ["y_factual"]
)
df_treatment = pd.read_csv(
"https://users.nber.org/~rdehejia/data/nswre74_treated.txt", sep=" "
).dropna(axis=1)
df_treatment.columns = (
["treatment"] + ["x" + str(x) for x in range(1, 9)] + ["y_factual"]
)
df = (
pd.concat([df_control, df_treatment], axis=0, ignore_index=True)
.sample(frac=1)
.reset_index(drop=True)
)
return df
def amazon_reviews(rating="pos") -> pd.DataFrame:
"""loads amazon reviews dataset
The dataset describes the impact of positive (or negative) reviews for products on Amazon on sales.
The authors distinguish between items with more than three reviews (treated) and less than three
reviews (untreated). As the rating given by reviews might impact sales, they divide the dataset
into products with on average positive (more than 3 starts) or negative (less than three stars)
reviews.
The dataset consists of 305 covariates (doc2vec features of the review text), a binary treatment
variable (more than 3 reviews vs less than three reviews) and a continuous outcome (sales).
If used for academic purposes, please consider citing the authors:
@inproceedings{rakesh2018linked,
title={Linked Causal Variational Autoencoder for Inferring Paired Spillover Effects},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={Proceedings of the 27th ACM International Conference on Information and Knowledge Management},
pages={1679--1682},
year={2018},
organization={ACM}
}
Args:
rating (str, optional): choose between positive ('pos') and negative ('neg') reviews. Defaults to 'pos'.
Returns:
pd.DataFrame: dataset with cols "treatment", "y_factual" and covariates "x1" to "x300"
"""
try:
assert rating in ["pos", "neg"]
except AssertionError:
print(
"you need to specify which rating dataset you'd like to load. The options are 'pos' or 'neg'"
)
return None
try:
import gdown
except ImportError:
gdown = None
if rating == "pos":
url = "https://drive.google.com/file/d/167CYEnYinePTNtKpVpsg0BVkoTwOwQfK/view?usp=sharing"
elif rating == "neg":
url = "https://drive.google.com/file/d/1b-MPNqxCyWSJE5uyn5-VJUwC8056HM8u/view?usp=sharing"
if gdown:
try:
df = pd.read_csv("amazon_" + rating + ".csv")
except FileNotFoundError:
gdown.download(url, "amazon_" + rating + ".csv", fuzzy=True)
df = pd.read_csv("amazon_" + rating + ".csv")
df.drop(df.columns[[2, 3, 4]], axis=1, inplace=True)
df.columns = ["treatment", "y_factual"] + ["x" + str(i) for i in range(1, 301)]
return df
else:
print(
f"""The Amazon dataset is hosted on google drive. As it's quite large, the gdown package is required to download
the package automatically. The package can be installed via 'pip install gdown'.
Alternatively, you can download it from the following link and store it in the datasets folder:
{url}"""
)
return None
def synth_ihdp() -> pd.DataFrame:
"""loads IHDP dataset
The Infant Health and Development Program (IHDP) dataset contains data on the impact of visits by specialists
on the cognitive development of children. The dataset consists of 25 covariates describing various features
of these children and their mothers, a binary treatment variable (visit/no visit) and a continuous outcome.
If used for academic purposes, consider citing the authors:
@article{hill2011,
title={Bayesian nonparametric modeling for causal inference.},
author={<NAME>},
journal={Journal of Computational and Graphical Statistics},
volume={20},
number={1},
pages={217--240},
year={2011}
}
Returns:
pd.DataFrame: dataset for causal inference with cols "treatment", "y_factual" and covariates "x1" to "x25"
"""
# load raw data
data = pd.read_csv(
"https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/IHDP/csv/ihdp_npci_1.csv",
header=None,
)
col = [
"treatment",
"y_factual",
"y_cfactual",
"mu0",
"mu1",
]
for i in range(1, 26):
col.append("x" + str(i))
data.columns = col
# drop the columns we don't care about
ignore_patterns = ["y_cfactual", "mu"]
ignore_cols = [c for c in data.columns if any([s in c for s in ignore_patterns])]
data = data.drop(columns=ignore_cols)
return data
def synth_acic(condition=1) -> pd.DataFrame:
"""loads data from ACIC Causal Inference Challenge 2016
The dataset consists of 58 covariates, a binary treatment and a continuous response.
There are 10 simulated pairs of treatment and response, which can be selected
with the condition argument supplied to this function.
If used for academic purposes, consider citing the authors:
@article{dorie2019automated,
title={Automated versus do-it-yourself methods for causal inference: Lessons learned from a
data analysis competition},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
journal={Statistical Science},
volume={34},
number={1},
pages={43--68},
year={2019},
publisher={Institute of Mathematical Statistics}
}
Args:
condition (int): in [1,10], corresponds to 10 simulated treatment/response pairs. Defaults to 1.
Returns:
pd.DataFrame: dataset for causal inference with columns "treatment", "y_factual" and covariates "x_1" to "x_58"
"""
try:
assert condition in range(1, 11)
except AssertionError:
print("'condition' needs to be in [1,10]")
return None
covariates = pd.read_csv(
"https://raw.githubusercontent.com/IBM/causallib/"
+ "master/causallib/datasets/data/acic_challenge_2016/x.csv"
)
cols = covariates.columns
covariates.rename(
columns={c: c.replace("_", "") for c in cols},
inplace=True,
)
url = (
"https://raw.githubusercontent.com/IBM/causallib/master/causallib/"
+ f"datasets/data/acic_challenge_2016/zymu_{condition}.csv"
)
z_y_mu = | pd.read_csv(url) | pandas.read_csv |
from ast import operator
import csv
from datetime import datetime
from operator import index, mod
import os
import sys
import math
import time
import warnings
import itertools
import numpy as np
import pandas as pd
# import scrapbook as sb
import matplotlib.pyplot as plt
from pmdarima.arima import auto_arima
pd.options.display.float_format = "{:,.2f}".format
np.set_printoptions(precision=2)
warnings.filterwarnings("ignore")
print("System version: {}".format(sys.version))
# Forecasting settings
N_SPLITS = 1
HORIZON = 5 # Forecast 2 Days
GAP = 1
FIRST_WEEK = 40
LAST_WEEK = 138
# Parameters of ARIMA model
params = {
"seasonal": False,
"start_p": 0,
"start_q": 0,
"max_p": 5,
"max_q": 5,
"m": 52,
}
def readCSV():
# 读取csv至字典
csvFile = open("BCHAIN-MKPRU.csv", "r")
reader = csv.reader(csvFile)
date = []
price = []
# 建立空字典
result = {}
for item in reader:
# 忽略第一行
if reader.line_num == 1:
continue
result[item[0]] = float(item[1])
csvFile.close()
for k, v in result.items():
result[k] = math.log(v)
date.append(datetime.strptime(k, '%m/%d/%y'))
price.append(result[k])
return (result, date, price)
def getDatePrice(result):
date = []
price = []
for k, v in result.iterrows():
result[k] = math.log(v['Value'])
date.append(datetime.strptime(k, '%m/%d/%y'))
price.append(math.log(v['Value']))
return (date, price)
def createDF(date, price):
period = 20
date = date[-period:]
price = price[-period:]
mid = int(period * 0.7)
train_df = pd.DataFrame({'date': date[:mid], 'price': price[:mid]},
index=date[:mid], columns=['date', 'price'])
test_df = pd.DataFrame({'date': date[mid:], 'price': price[mid:]},
index=date[mid:], columns=['date', 'price'])
return (train_df, test_df)
def train(train_ts):
train_ts = np.array(train_ts.logmove)
model = auto_arima(
train_ts,
seasonal=params["seasonal"],
start_p=params["start_p"],
start_q=params["start_q"],
max_p=params["max_p"],
max_q=params["max_q"],
stepwise=True,
)
model.fit(train_ts)
def MAPE(predictions, actuals):
"""
Implements Mean Absolute Percent Error (MAPE).
Args:
predictions (array like): a vector of predicted values.
actuals (array like): a vector of actual values.
Returns:
numpy.float: MAPE value
"""
if not (isinstance(actuals, pd.Series) and isinstance(predictions, pd.Series)):
predictions, actuals = pd.Series(predictions), | pd.Series(actuals) | pandas.Series |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = | pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec") | pandas.Series |
import os
from abc import ABC
import json
import numpy as np
import pandas as pd
from odin.classes import DatasetInterface, TaskType
from odin.utils import *
from odin.utils.utils import encode_segmentation, compute_aspect_ratio_of_segmentation
from pycocotools import mask
from pycocotools import coco
logger = get_root_logger()
class DatasetLocalization(DatasetInterface, ABC):
annotations = None
images = None
possible_analysis = set()
area_size_computed = False
aspect_ratio = False
common_properties = {'area', 'bbox', 'category_id', 'id', 'image_id', 'iscrowd', 'segmentation'}
supported_types = [TaskType.OBJECT_DETECTION, TaskType.INSTANCE_SEGMENTATION]
def __init__(self, dataset_gt_param, task_type, proposal_path=None, images_set_name='test',
images_abs_path=None, similar_classes=None, property_names=None, terminal_env=False,
properties_file=None, for_analysis=True, match_on_filename=False):
if task_type not in self.supported_types:
logger.error(f"Task not supported: {task_type}")
super().__init__(dataset_gt_param, proposal_path, images_set_name, images_abs_path, similar_classes,
property_names, terminal_env, properties_file, match_on_filename)
self.objnames_TP_graphs = [] # clear. the TRUE POSITIVE that can be drawn
self.possible_analysis = set() # clear. It would be updated according to the dataset provided
self.is_segmentation = task_type == TaskType.INSTANCE_SEGMENTATION
self.similar_classes = similar_classes
self.for_analysis = for_analysis
self.load()
def dataset_type_name(self):
return self.images_set_name
def get_annotations_from_class_list(self, classes_to_classify):
classes_id_filter = self.get_categories_id_from_names(classes_to_classify)
anns_filtered = self.annotations[self.annotations["category_id"].isin(classes_id_filter)]
return anns_filtered.to_dict("records")
def is_segmentation_ds(self):
return self.is_segmentation
def __load_proposals(self):
counter = 0
issues = 0
proposals = []
for i, cat in self.categories.iterrows():
c = cat["name"]
c_id = cat["id"]
proposal_path = os.path.join(self.proposal_path, c + ".txt")
with open(proposal_path, "r") as file:
for line in file:
if self.is_segmentation:
try:
arr = line.split(" ")
match_param, confidence = arr[0], float(arr[1])
if not self.match_on_filename:
match_param = int(match_param)
try:
segmentation = [float(v) for v in arr[2:]]
except:
segmentation = []
counter += 1
proposals.append(
{"confidence": confidence, "segmentation": segmentation, self.match_param_props: match_param,
"category_id": c_id, "id": counter})
except:
issues += 1
else:
try:
match_param, confidence, x1, y1, x2, y2 = line.split(" ")
if not self.match_on_filename:
match_param = int(match_param)
confidence = float(confidence)
x1, y1, x2, y2 = float(x1), float(y1), float(x2), float(y2)
counter += 1
proposals.append(
{"confidence": confidence, "bbox": [x1, y1, x2, y2], self.match_param_props: match_param,
"category_id": c_id, "id": counter})
except:
issues += 1
self.__proposals_length = counter
logger.info("Loaded {} proposals and failed with {}".format(counter, issues))
return pd.DataFrame(proposals)
def load(self, force_loading=False):
self.area_size_computed = False
self.aspect_ratio = False
try:
if force_loading or self.coco_lib is None or self.annotations is None:
self.coco_lib = coco.COCO(self.dataset_root_param)
data = json.load(open(self.dataset_root_param, "r"))
self.images = pd.DataFrame(data["images"])
self.annotations = | pd.DataFrame(data["annotations"]) | pandas.DataFrame |
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import os
import re
import torch
import pandas as pd
import subprocess
import torch.nn.functional as F
def isEnglish(s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
def camel_case_split(identifier):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
def preprocess_seq(input,wpt,stop_words):
#w = re.sub(r'[^a-zA-Z0-9@$%\s]', ' ', input, re.I | re.A)
w=input
w = w.strip()
# tokenize document
tokens = wpt.tokenize(w)
# filter stopwords out of document
filtered_tokens = [token for token in tokens if token not in stop_words]
camel_tokens=[]
for w in filtered_tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens=camel_tokens
# convert to lower case
tokens = ' '.join(tokens)
tokens = tokens.lower()
#tokens=tokens.split(' ')
return tokens
def preprocess(input,type):
if type=='attribute':
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
#w = input.replace('_', ' ')
tokens = word_tokenize(w)
camel_tokens=[]
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens=camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
# table = str.maketrans('', '', string.punctuation)
# stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in inter_words2 if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
# final_words = []
# for w in words:
# inter = re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words += inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok)]
elif type=='value':
#w = input.replace('_', ' ').replace(',', ' ')
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
#w=input
tokens = word_tokenize(w)
camel_tokens = []
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens = camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
# table = str.maketrans('', '', string.punctuation)
# stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
numerical_values=[]
string_values=[]
for word in inter_words2:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values.append(word)
string_values_final=[]
for w in string_values:
inter=re.split(r'(\d+)', w)
for word in inter:
if len(word)>0:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values_final.append(word)
#keep 0 digits
#numerical_values = [re.sub('\d', '#', s) for s in numerical_values]
#keep 1 digit
numerical_values_inter=[]
for s in numerical_values:
if s[0]=='-':
ss=s[2::]
ss=re.sub('\d', '#', ss)
ss=s[0:2]+ss
else:
ss = s[1::]
ss = re.sub('\d', '#', ss)
ss = s[0] + ss
numerical_values_inter += [ss]
#keep 2 digits
# for s in numerical_values:
# ss=s[2::]
# ss=re.sub('\d', '#', ss)
# ss=s[0:2]+ss
# numerical_values_inter+=[ss]
numerical_values=numerical_values_inter
inter_words2 = string_values_final
words = [word for word in inter_words2 if word.isalpha() or word in['$','@','%','£','€','°']]
# filter out stop words
stop_words = set(stopwords.words('english'))
stop_words.remove('d')
stop_words.remove('m')
stop_words.remove('s')
words = [w for w in words if not w in stop_words]
# final_words = []
# for w in words:
# inter = re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words += inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok) or tok in['$','@','%','£','€','°']]
final_words=final_words+numerical_values
elif type=='value2':
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
tokens = word_tokenize(w)
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
numerical_values=[]
string_values=[]
for word in inter_words2:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values.append(word)
string_values_final=[]
for w in string_values:
inter=re.split(r'(\d+)', w)
for word in inter:
if len(word)>0:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values_final.append(word)
inter_words2 = string_values_final
words = [word for word in inter_words2 if word.isalpha() or word in['$','@','%','£','€','°']]
# filter out stop words
stop_words = set(stopwords.words('english'))
stop_words.remove('d')
stop_words.remove('m')
stop_words.remove('s')
words = [w for w in words if not w in stop_words]
final_words = []
for w in words:
inter = re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
final_words += inter
final_words = [tok for tok in final_words if isEnglish(tok) or tok in['$','@','%','£','€','°']]
final_words=final_words+numerical_values
elif type == 'description':
#w = input.replace('_', ' ').replace(',', ' ').replace('-', " ").replace('.', ' ')
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
tokens = word_tokenize(w)
camel_tokens = []
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens = camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
#table = str.maketrans('', '', string.punctuation)
#stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in inter_words2 if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
# final_words=[]
# for w in words:
# inter=re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words+=inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok)]
not_to_use=['com','u','comma','separated','values','csv','data','dataset','https','api','www','http','non','gov','rows','p','download','downloads','file','files','p']
final_words=[tok for tok in final_words if tok not in not_to_use]
return final_words
def kernal_mus(n_kernels):
l_mu = [1]
if n_kernels == 1:
return l_mu
bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]
l_mu.append(1 - bin_size / 2) # mu: middle of the bin
for i in range(1, n_kernels - 1):
l_mu.append(l_mu[i] - bin_size)
return l_mu
def kernel_sigmas(n_kernels):
bin_size = 2.0 / (n_kernels - 1)
l_sigma = [0.001] # for exact match. small variance -> exact match
if n_kernels == 1:
return l_sigma
l_sigma += [0.1] * (n_kernels - 1)
return l_sigma
def load_checkpoint(model, optimizer, losslogger, filename):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
losslogger = checkpoint['losslogger']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch, losslogger
def load_checkpoint_for_eval(model, filename):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model
def read_file_for_nfcg(file):
text_file = open(file, "r")
lines = text_file.readlines()
queries_id = []
list_lines = []
for line in lines:
# print(line)
line = line[0:len(line) - 1]
aa = line.split('\t')
queries_id += [aa[0]]
list_lines.append(aa)
inter = np.array(list_lines)
return inter
def calculate_metrics(inter, output_file,all_outputs,ndcg_file):
inter2 = []
for jj, item in enumerate(inter):
item_inter = [i for i in item]
item_inter[4] = str(all_outputs[jj])
inter2.append(item_inter)
inter3 = np.array(inter2)
np.savetxt(output_file, inter3, fmt="%s")
#batcmd = "./trec_eval -m ndcg_cut.5 "+ndcg_file+" " + output_file
batcmd = "./trec_eval -m map " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
map = float(res[2])
batcmd = "./trec_eval -m recip_rank " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
mrr = float(res[2])
batcmd = "./trec_eval -m ndcg_cut.5 " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
ndcg = float(res[2])
return ndcg,map,mrr
def calculate_ndcg(inter, output_file,all_outputs,ndcg_file):
inter2 = []
for jj, item in enumerate(inter):
item_inter = [i for i in item]
item_inter[4] = str(all_outputs[jj])
inter2.append(item_inter)
inter3 = np.array(inter2)
np.savetxt(output_file, inter3, fmt="%s")
batcmd = "./trec_eval -m ndcg_cut.5 "+ndcg_file+" " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
ndcg = float(res[2])
return ndcg
def qrel_for_data(data,list_lines_qrels,output_file):
#list_lines_qrels=np.array(list_lines_qrels)
df = | pd.DataFrame(list_lines_qrels) | pandas.DataFrame |
"""Plot data gathered for success and collision rates"""
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def plot_success_rate():
# Define data, gathered from various scripts, in tidy data format
data = []
# Neural oCBF/oCLF data generated using eval_turtlebot_neural_cbf_mpc_success_rates
data += [
{
"Algorithm": "Observation-based CBF/CLF (ours)",
"Metric": "Goal-reaching rate",
"Value": 0.932,
},
{
"Algorithm": "Observation-based CBF/CLF (ours)",
"Metric": "Safety rate",
"Value": 1.0,
},
{
"Algorithm": "Observation-based CBF/CLF (ours)",
"Metric": "Avg. time to goal (s)",
"Value": 2.1838412017167395,
},
]
# State-based CBF data also generated using
# eval_turtlebot_neural_cbf_mpc_success_rates
data += [
{
"Algorithm": "State-based CBF/CLF",
"Metric": "Goal-reaching rate",
"Value": 0.546,
},
{"Algorithm": "State-based CBF/CLF", "Metric": "Safety rate", "Value": 0.626},
{
"Algorithm": "State-based CBF/CLF",
"Metric": "Avg. time to goal (s)",
"Value": 1.9382783882783883,
},
]
# MPC data also generated using eval_turtlebot_neural_cbf_mpc_success_rates
data += [
{
"Algorithm": "MPC",
"Metric": "Goal-reaching rate",
"Value": 0.904,
},
{"Algorithm": "MPC", "Metric": "Safety rate", "Value": 0.996},
{
"Algorithm": "MPC",
"Metric": "Avg. time to goal (s)",
"Value": 2.093,
},
]
# PPO data gathered by running
# python scripts/test_policy.py \
# data/2021-08-13_ppo_turtle2d/2021-08-13_15-23-36-ppo_turtle2d_s0 \
# --len 100 --episodes 100 --norender
# in the safety_starter_agents directory, with the turtle2d env.
# Steps are converted to time with timestep 0.1
data += [
{"Algorithm": "PPO", "Metric": "Goal-reaching rate", "Value": 256 / 500},
{"Algorithm": "PPO", "Metric": "Safety rate", "Value": 1 - 57 / 500},
{"Algorithm": "PPO", "Metric": "Avg. time to goal (s)", "Value": 0.1 * 38.07},
]
# Convert to dataframe
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, layer2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='SepalWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = rplot.GeomPolyFit(2)
self.layers = rplot.sequence_layers([layer1, layer2, layer3])
self.trellis1 = rplot.TrellisGrid(['sex', 'smoker'])
self.trellis2 = rplot.TrellisGrid(['sex', '.'])
self.trellis3 = rplot.TrellisGrid(['.', 'smoker'])
self.trellised1 = self.trellis1.trellis(self.layers)
self.trellised2 = self.trellis2.trellis(self.layers)
self.trellised3 = self.trellis3.trellis(self.layers)
def test_grid_sizes(self):
self.assertEqual(len(self.trellised1), 3)
self.assertEqual(len(self.trellised2), 3)
self.assertEqual(len(self.trellised3), 3)
self.assertEqual(len(self.trellised1[0]), 2)
self.assertEqual(len(self.trellised1[0][0]), 2)
self.assertEqual(len(self.trellised2[0]), 2)
self.assertEqual(len(self.trellised2[0][0]), 1)
self.assertEqual(len(self.trellised3[0]), 1)
self.assertEqual(len(self.trellised3[0][0]), 2)
self.assertEqual(len(self.trellised1[1]), 2)
self.assertEqual(len(self.trellised1[1][0]), 2)
self.assertEqual(len(self.trellised2[1]), 2)
self.assertEqual(len(self.trellised2[1][0]), 1)
self.assertEqual(len(self.trellised3[1]), 1)
self.assertEqual(len(self.trellised3[1][0]), 2)
self.assertEqual(len(self.trellised1[2]), 2)
self.assertEqual(len(self.trellised1[2][0]), 2)
self.assertEqual(len(self.trellised2[2]), 2)
self.assertEqual(len(self.trellised2[2][0]), 1)
self.assertEqual(len(self.trellised3[2]), 1)
self.assertEqual(len(self.trellised3[2][0]), 2)
def test_trellis_cols_rows(self):
self.assertEqual(self.trellis1.cols, 2)
self.assertEqual(self.trellis1.rows, 2)
self.assertEqual(self.trellis2.cols, 1)
self.assertEqual(self.trellis2.rows, 2)
self.assertEqual(self.trellis3.cols, 2)
self.assertEqual(self.trellis3.rows, 1)
@tm.mplskip
class TestScaleGradient(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient("SepalLength", colour1=(0.2, 0.3,
0.4),
colour2=(0.8, 0.7, 0.6))
def test_gradient(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
@tm.mplskip
class TestScaleGradient2(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient2("SepalLength", colour1=(0.2, 0.3, 0.4), colour2=(0.8, 0.7, 0.6), colour3=(0.5, 0.5, 0.5))
def test_gradient2(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
r3, g3, b3 = self.gradient.colour3
value = row[self.gradient.column]
a_ = min(self.data[self.gradient.column])
b_ = max(self.data[self.gradient.column])
scaled = (value - a_) / (b_ - a_)
if scaled < 0.5:
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
else:
self.assertTrue(between(r2, r3, r))
self.assertTrue(between(g2, g3, g))
self.assertTrue(between(b2, b3, b))
@tm.mplskip
class TestScaleRandomColour(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.colour = rplot.ScaleRandomColour('SepalLength')
def test_random_colour(self):
for index in range(len(self.data)):
colour = self.colour(self.data, index)
self.assertEqual(len(colour), 3)
r, g, b = colour
self.assertTrue(r >= 0.0)
self.assertTrue(g >= 0.0)
self.assertTrue(b >= 0.0)
self.assertTrue(r <= 1.0)
self.assertTrue(g <= 1.0)
self.assertTrue(b <= 1.0)
@tm.mplskip
class TestScaleConstant(tm.TestCase):
def test_scale_constant(self):
scale = rplot.ScaleConstant(1.0)
self.assertEqual(scale(None, None), 1.0)
scale = rplot.ScaleConstant("test")
self.assertEqual(scale(None, None), "test")
class TestScaleSize(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.scale1 = rplot.ScaleShape('Name')
self.scale2 = rplot.ScaleShape('PetalLength')
def test_scale_size(self):
for index in range(len(self.data)):
marker = self.scale1(self.data, index)
self.assertTrue(marker in ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x'])
def test_scale_overflow(self):
def f():
for index in range(len(self.data)):
self.scale2(self.data, index)
self.assertRaises(ValueError, f)
@tm.mplskip
class TestRPlot(tm.TestCase):
def test_rplot1(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot2(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['.', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot3(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add( | rplot.TrellisGrid(['sex', '.']) | pandas.tools.rplot.TrellisGrid |
import argparse
import os
import pickle
from datetime import datetime
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import wilcoxon
from statsmodels import robust
import config
import data_loader
import solve.helper
from data_loader import load_stats_log, load_predict_log
def figsize_column(scale, height_ratio=1.0):
fig_width_pt = 239 # Get this from LaTeX using \the\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
def figsize_text(scale, height_ratio=1.0):
fig_width_pt = 505 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 9,
"font.size": 9,
"legend.fontsize": 9,
"xtick.labelsize": 9,
"ytick.labelsize": 9,
"figure.figsize": figsize_column(1.0),
"text.latex.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
sns.set_style("whitegrid", pgf_with_latex)
sns.set_context("paper")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
LABELS = {
'mrcpsp': 'MRCPSP',
'rcpsp': 'RCPSP',
'2DBinPacking': 'Bin Packing',
'2DLevelPacking': 'Bin Packing',
'prize-collecting': 'Prize Coll.',
'jobshop': 'Jobshop',
'vrp': 'VRP',
'tsp': 'TSP',
'open_stacks': 'Open Stacks',
'cutstock': 'Cutting Stock',
'2DLevelPacking.mzn': 'Bin Packing',
'cutstock.mzn': 'Cutting Stock',
'mrcpsp.mzn': 'MRCPSP',
'jobshop.mzn': 'Jobshop',
'open_stacks_01.mzn': 'Open Stacks',
'rcpsp.mzn': 'RCPSP',
'tsp.mzn': 'TSP',
'vrp.mzn': 'VRP',
'chuffed': 'Chuffed',
'choco': 'Choco',
'ortools': 'OR-Tools',
'gecode': 'Gecode',
'sunnycp': 'Sunny-CP',
'objective': 'O',
'objective_diff': 'O',
'time': 'T',
'time_diff': 'T',
'mzn': 'Model',
'solver': 'Solver',
'filters': 'Filters',
'table-layout': 'Table Layout',
'depot-placement': 'Depot Place.',
'carpet-cutting': 'Carpet',
'average': 'Avg.',
'extreme': 'Max.',
'network': 'NN$_s$',
'networka': 'NN$_a$',
'xgb': 'GTB$_s$',
'xgba': 'GTB$_a$',
'linear': 'LR',
'svm': 'SVM',
'pruned_domain': 'Domain Pruned (%)',
'pruned_ratio': 'Pruning (%)',
'estimator': 'Prediction Model',
'adjustment': 'Target Shift',
'problem': 'Problem',
'count': 'I',
'obj_diff': 'QOF',
'time_diff': 'TTF',
'hard': 'Boundary Constraints',
'soft': 'Bounds-Aware Search',
'est': 'EST',
}
def estimation_table_network(files, filename=None):
labels = {
'problem': 'Problem',
'dzn': 'Inst.',
'correct': 'Satisfiable (%)',
'error': 'Error (%)',
'loss_fn': 'Loss',
'ensemble_mode': 'Ensemble Mode'
}
active_problems = [p.name for p in config.PROBLEMS]
dfs = []
for i, f in enumerate(files):
dump = pickle.load(open(f, 'rb'))
if dump['loss_fn'] != 'shiftedmse':
continue
if dump['problem'].name not in active_problems:
continue
pred_df = pd.DataFrame(dump['prediction'], columns=['dzn', 'predicted', 'truth'])
pred_df['iteration'] = i
pred_df['problem'] = dump['problem'].name
pred_df['loss_fn'] = dump['loss_fn']
pred_df['ensemble_mode'] = dump['ensemble_mode']
if dump['problem'].minmax == 'min':
pred_df['correct'] = pred_df['predicted'] >= pred_df['truth']
else:
pred_df['correct'] = pred_df['predicted'] <= pred_df['truth']
corr_df = pred_df[pred_df['correct']]
pred_df['error'] = abs(corr_df['predicted'] - corr_df['truth']) / corr_df['truth']
dfs.append(pred_df)
df = pd.concat(dfs)
gdf = df.groupby(['problem', 'ensemble_mode', 'loss_fn'], as_index=False).agg({
'dzn': 'nunique',
'correct': lambda x: 100 * np.mean(x),
'error': lambda x: 100 * np.mean(x)
})
gdf = gdf.round(2)
gdf = gdf.rename(columns=labels)
gdf = gdf.replace(LABELS.keys(), LABELS.values())
out_df = gdf[[labels['problem'], labels['ensemble_mode'], labels['correct'], labels['error']]]
out_df = out_df.pivot(index=labels['problem'], columns=labels['ensemble_mode'])
print(out_df)
if filename:
output_string = out_df.to_latex(multicolumn=True, multicolumn_format='c')
open(filename, 'w').write(output_string)
def adjustment_table(logfile, outputdir=None):
df = load_stats_log(logfile)
df.loc[(df.estimator == 'network') & (df.loss == 'shiftedmse'), 'estimator'] = 'networka'
df[['pruned_domain', 'adjustment']] *= 100
out_df = df.pivot_table(index='adjustment', columns='estimator', values='pruned_domain')
out_df = out_df.round(1)
out_df.index = out_df.index.astype(int)
out_df.rename(columns=LABELS, inplace=True)
out_df.sort_index(level=0, axis=1, inplace=True)
if outputdir:
formatter = [(lambda cmax: (lambda x: max_formatter(x, cmax)))(cmax) for cmax in out_df.max(axis=0).tolist()]
output_string = out_df.to_latex(index_names=False, column_format='rrrrrrr', formatters=formatter, escape=False)
output_string = output_string.replace('toprule\n{} &', 'toprule\nAdj. &')
output_string = output_string.replace('\\midrule\n', '')
output_string = output_string.replace('\n0 ', '\n\\midrule\n0 ')
if 1 in df.outputs.unique():
filename = 'adjustment_o1.tex'
else:
filename = 'adjustment_o2.tex'
open(os.path.join(outputdir, filename), 'w').write(output_string)
print(out_df)
out_df.to_clipboard()
def adjustment_graph(logfile, outputdir=None, column='true_overest'):
if outputdir:
height_ratio = 0.4 if column in ('true_overest', 'true_pairs') else 0.65
_, ax = plt.subplots(figsize=figsize_column(1.0, height_ratio=height_ratio))
else:
_, ax = plt.subplots()
df = load_stats_log(logfile)
df.loc[(df.estimator == 'network') & (df.loss == 'shiftedmse'), 'estimator'] = 'networka'
df[[column]] *= 100
df = df[(df.estimator.isin(['network', 'networka', 'xgb', 'xgba', 'linear', 'svm'])) & (df.adjustment < 1.0)]
# df['adjustment'] = df['adjustment'].astype(int)
df = df.replace(LABELS.keys(), LABELS.values())
sns.pointplot(x='adjustment', y=column, hue='estimator', scale=0.5, estimator=np.median, join=True,
# hue_order=['GTB/a', 'GTB/s', 'NN/a', 'NN/s', 'SVM', 'Linear'],
markers=['d', '.', '+', 'x', '*', 'v'],
dodge=False, data=df, ax=ax) # ci='sd'
if column in ('true_overest', 'true_pairs'):
ax.set_ylabel('Admissible (\%)')
ax.set_xlabel('')
ax.legend(ncol=3, loc='lower right', labelspacing=0.2, columnspacing=0.1)
# ax.set_xticklabels([])
else:
ax.set_ylabel('Admissible (\%)')
ax.set_xlabel('Adjustment Factor $\lambda$')
# ax.legend_.remove()
ax.legend(ncol=3, loc='lower right', labelspacing=0.2, columnspacing=0.1)
ax.set_ylim([0, 100])
sns.despine(ax=ax)
if outputdir:
if 1 in df.outputs.unique():
filename = 'adjustment_o1_{}.pgf'.format(column)
else:
filename = 'adjustment_o2_{}.pgf'.format(column)
# plt.tight_layout()
plt.savefig(os.path.join(outputdir, filename), dpi=500, bbox_inches='tight', pad_inches=0)
def estimation_table_o2(logfile, outputdir=None):
run_name = os.path.basename(os.path.dirname(logfile[0]))
df = load_stats_log(os.path.join(logfile[0], "*-stats.log"))
# Backwards compatibility; in new runs it is already named networka
df.loc[(df.estimator == 'network') & (df.loss == 'shiftedmse'), 'estimator'] = 'networka'
df = df[df.estimator.isin(['network', 'networka', 'xgb', 'xgba', 'linear', 'svm'])]
# print(df[['estimator', 'traintime']].groupby(['estimator'], as_index=False).median())
gdf = df[['estimator', 'adjustment', 'pruned_domain']].groupby(['estimator', 'adjustment'], as_index=False).median()
bestconfigs = gdf.ix[gdf.groupby('estimator', as_index=False)['pruned_domain'].idxmax()][
['estimator', 'adjustment']]
preddf = load_predict_log(os.path.join(logfile[0], "*-predict.log"))
preddf.loc[(preddf.estimator == 'network') & (preddf.loss == 'shiftedmse'), 'estimator'] = 'networka'
preddf = preddf[preddf.estimator.isin(['network', 'networka', 'xgb', 'xgba', 'linear', 'svm'])]
pdf = preddf.merge(bestconfigs, how='inner', suffixes=['', '_r'], on=['estimator', 'adjustment'])
# bestconfigs = gdf.ix[gdf.groupby('estimator', as_index=False)['pruned_domain'].idxmax()][
# ['estimator', 'adjustment']]
# pdf = df.merge(bestconfigs, how='inner', suffixes=['', '_r'], on=['estimator', 'adjustment'])
print(bestconfigs)
labels = {'true_pair': 'Feas.',
'size_red': 'Size',
'gap': 'Gap',
'pruned_domain': 'SP',
'pruned_ratio': 'Pruned',
'estimator': 'Model',
'problem': 'Problem',
'overest_error': 'OE',
'underest_error': 'UE'}
pdf['true_pair'] = (pdf['underest'] <= pdf['optimum']) & (pdf['optimum'] <= pdf['overest'])
pdf['size_red'] = 0
pdf.loc[pdf['true_pair'], 'size_red'] = 1 - (pdf['dom_size_new'] / pdf['dom_size'])
pdf['gap'] = 0
pdf.loc[pdf['true_pair'], 'gap'] = 1 - (
(pdf['dom_upper_new'] - pdf['optimum']).abs() / (pdf['dom_upper'] - pdf['optimum']).abs())
pdf[['size_red', 'gap']] *= 100
pdf[['size_red', 'gap']] # .astype(int, copy=False)
# pdf[['true_pairs', 'pruned_domain', 'pruned_ratio', 'overest_error', 'underest_error']] *= 100
# pdf['overest_error'] += 1
# pdf['underest_error'] += 1
pdf.rename(columns=labels, inplace=True)
pdf.replace(LABELS.keys(), LABELS.values(), inplace=True)
def cust_aggfunc_star(x):
m = np.median(x)
# m = np.mean(x)
std = np.ceil(robust.scale.mad(x)).astype(int)
# std = np.ceil(np.std(x)).astype(int)
if std <= 5:
appendix = ''
elif std <= 10:
appendix = '+'
elif std <= 20:
appendix = '*'
elif std <= 30:
appendix = '**'
elif std <= 40:
appendix = '***'
else:
appendix = '{:d}'.format(std)
# appendix = '*' * (min(std // 5, 5))
if x.name == labels['overest_error']:
return "{:.1f}\\textsuperscript{{{}}}".format(m, appendix)
else:
m = np.round(m).astype(int)
return "{:d}\\textsuperscript{{{}}}".format(m, appendix)
def cust_aggfunc_pm(x):
m = np.floor(np.median(x)).astype(int)
std = np.ceil(robust.scale.mad(x)).astype(int)
# std = np.ceil(np.std(x)).astype(int)
if std > 5:
return "{:d}\\textsuperscript{{$\\pm${:d}}}".format(m, std)
else:
return "{:d}\\textsuperscript{{}}".format(m)
def median_int(x):
m = np.median(x)
m = np.floor(m).astype(int)
return "{:d}".format(m)
out_df = pdf.pivot_table(index=labels['problem'], columns=labels['estimator'], margins=False, margins_name='All',
values=[labels['size_red'], labels['gap']], aggfunc=cust_aggfunc_star)
out_df.columns = out_df.columns.swaplevel(0, 1)
out_df.sort_index(level=0, axis=1, inplace=True)
if outputdir:
output_string = out_df.to_latex(multicolumn=True, multicolumn_format='c',
column_format='l' + "|".join(
['RR' for _ in range(pdf[labels['estimator']].nunique())]),
index_names=False, escape=False)
output_string = output_string.replace('\\begin{tabular}', '\\begin{tabularx}{0.97\\textwidth}')
output_string = output_string.replace('\\end{tabular}', '\\end{tabularx}')
output_string = output_string.replace('±', '\\(\\pm\\)')
filename = 'estimation_o2.tex'
open(os.path.join(outputdir, filename), 'w').write(output_string)
def estimation_table(logfile, outputdir=None):
run_name = os.path.basename(os.path.dirname(logfile[0]))
df = load_stats_log(logfile)
# Backwards compatibility; in new runs it is already named networka
df.loc[(df.estimator == 'network') & (df.loss == 'shiftedmse'), 'estimator'] = 'networka'
df = df[df.estimator.isin(['network', 'networka', 'xgb', 'xgba', 'linear', 'svm'])]
print(df[['estimator', 'traintime']].groupby(['estimator'], as_index=False).median())
gdf = df[['estimator', 'adjustment', 'pruned_domain']].groupby(['estimator', 'adjustment'], as_index=False).median()
# gdf['pruned_domain_both'] = gdf['true_pairs'] & gdf['pruned_lower_dom'] & gdf['pruned_upper_dom']
bestconfigs = gdf.ix[gdf.groupby('estimator', as_index=False)['pruned_domain'].idxmax()][
['estimator', 'adjustment']]
pdf = df.merge(bestconfigs, how='inner', suffixes=['', '_r'], on=['estimator', 'adjustment'])
print(bestconfigs)
if df.outputs.unique()[0] == 1:
labels = {'true_pairs': 'Feas.',
'pruned_domain': 'SP',
'pruned_ratio': 'Pruned',
'estimator': 'Model',
'problem': 'Problem',
'overest_error': 'Gap',
'underest_error': 'Gap'}
else:
labels = {'true_pairs': 'Feas.',
'pruned_domain': 'SP',
'pruned_ratio': 'Pruned',
'estimator': 'Model',
'problem': 'Problem',
'overest_error': 'OE',
'underest_error': 'UE'}
pdf[['true_pairs', 'pruned_domain', 'pruned_ratio', 'overest_error', 'underest_error']] *= 100
pdf['overest_error'] += 1
pdf['underest_error'] += 1
pdf.rename(columns=labels, inplace=True)
pdf = pdf.replace(LABELS.keys(), LABELS.values())
def cust_aggfunc_pm(x):
m = np.floor(np.median(x)).astype(int)
std = np.ceil(robust.scale.mad(x)).astype(int)
# std = np.ceil(np.std(x)).astype(int)
return "{:d}+-{:2d}".format(m, std)
def cust_aggfunc_star(x):
m = np.median(x)
# m = np.mean(x)
std = np.ceil(robust.scale.mad(x)).astype(int)
# std = np.ceil(np.std(x)).astype(int)
appendix = '*' * (min(std // 5, 5))
if x.name == labels['overest_error']:
return "{:.1f}\\textsuperscript{{{}}}".format(m, appendix)
else:
m = np.floor(m).astype(int)
return "{:d}\\textsuperscript{{{}}}".format(m, appendix)
# Full Table
out_df = pdf.pivot_table(index=labels['problem'], columns=labels['estimator'], margins=False, margins_name='All',
values=[labels['true_pairs'], labels['pruned_ratio'], labels['overest_error'],
labels['underest_error']],
aggfunc=cust_aggfunc_pm)
out_df.columns = out_df.columns.swaplevel(0, 1)
out_df.sort_index(level=0, axis=1, inplace=True)
# del out_df['All'] # No separate all columns
if outputdir:
output_string = out_df.to_latex(multicolumn=True, multicolumn_format='c',
column_format='l' + "|".join(
['rrrr' for _ in range(pdf[labels['estimator']].nunique())]),
index_names=False, escape=False)
output_string = output_string.replace('{} & Feas', 'Problem & Feas')
output_string = output_string.replace('\\midrule\n', '')
output_string = output_string.replace('Pruned \\\\\n', 'Pruned \\\\\n\\midrule\n')
output_string = output_string.replace('\\\n2DBP', '\\\n\\midrule\n2DBP')
output_string = output_string.replace('+-', '\\(\\pm\\)')
if 1 in df.outputs.unique():
filename = 'estimation_o1_full.tex'
else:
filename = 'estimation_o2_full.tex'
open(os.path.join(outputdir, filename), 'w').write(output_string)
out_df.to_csv(os.path.join(outputdir, filename + '.csv'))
print(out_df)
out_df.to_clipboard()
out_df.to_html(run_name + '_estimation.html')
out_df.to_csv(run_name + '_estimation.csv')
# Small table
out_df = pdf.pivot_table(index=labels['problem'], columns=labels['estimator'], margins=False, margins_name='All',
values=[labels['pruned_ratio'], labels['overest_error'], labels['underest_error']],
aggfunc=cust_aggfunc_star)
out_df.columns = out_df.columns.swaplevel(0, 1)
out_df.sort_index(level=0, axis=1, inplace=True)
if outputdir:
output_string = out_df.to_latex(multicolumn=True, multicolumn_format='c', column_format='l' + "|".join(
['rrr' for _ in range(pdf[labels['estimator']].nunique())]),
index_names=False, escape=False)
output_string = output_string.replace('{} & Feas', 'Problem & Feas')
output_string = output_string.replace('\\midrule\n', '')
output_string = output_string.replace('Pruned \\\\\n', 'Pruned \\\\\n\\midrule\n')
output_string = output_string.replace('\\\n2DBP', '\\\n\\midrule\n2DBP')
output_string = output_string.replace('±', '\\(\\pm\\)')
if 1 in df.outputs.unique():
filename = 'estimation_o1.tex'
else:
filename = 'estimation_o2.tex'
open(os.path.join(outputdir, filename), 'w').write(output_string)
def max_formatter(x, max_value):
if x == max_value:
return '\\textbf{%s}' % x
else:
return str(x)
def estimation_bars(logfile, outputdir=None):
if outputdir:
_, ax = plt.subplots(figsize=figsize_text(1.0, height_ratio=0.6))
else:
_, ax = plt.subplots()
df = load_stats_log(logfile)
df.loc[(df.estimator == 'network') & (df.loss == 'shiftedmse'), 'estimator'] = 'networka'
df[['true_pairs', 'pruned_ratio', 'pruned_domain']] *= 100
# gdf = df.groupby(['estimator', 'adjustment'], as_index=False).mean()
# gdf.sort_values(['true_pairs', 'pruned_ratio'], inplace=True)
# gdf.plot.bar(x=['estimator', 'adjustment'], y=['pruned_domain'], ax=ax)
# sns.boxplot('estimator', 'pruned_domain', hue='adjustment', data=df, ax=ax)
sns.barplot('estimator', 'pruned_domain', hue='adjustment', data=df, ax=ax)
ax.set_ylim([0, 100])
# ax.yaxis.set_ticks(np.arange(0, 101, 5))
ax.set_ylabel(LABELS['pruned_domain'])
ax.set_xlabel(LABELS['estimator'])
ax.legend(title=LABELS['adjustment'])
ax.set_title('Number of instances for which the domain was pruned by boundary estimation')
sns.despine(ax=ax)
if outputdir:
plt.savefig(os.path.join(outputdir, 'estimationbars.pgf'), dpi=500, bbox_inches='tight', pad_inches=0)
def boundaryeffects_table(files):
df = solve.helper.read_stats(files)
unbounded = (df['LowerBound'] == -1) & (df['UpperBound'] == -1)
only_lower = (df['LowerBound'] != -1) & (df['UpperBound'] == -1)
only_upper = (df['LowerBound'] == -1) & (df['UpperBound'] != -1)
both_bounds = (df['LowerBound'] != -1) & (df['UpperBound'] != -1)
df.loc[unbounded, 'Bounds'] = 'No'
df.loc[only_lower, 'Bounds'] = 'Lower'
df.loc[only_upper, 'Bounds'] = 'Upper'
df.loc[both_bounds, 'Bounds'] = 'Both'
if 'search_time':
df.loc[df.Solver == 'chuffed', 'Runtime'] = df.loc[df.Solver == 'chuffed', 'search_time'] * 1000
del df['search_time']
assert (unbounded.sum() + only_lower.sum() + only_upper.sum() + both_bounds.sum() == len(df))
x = df.groupby(['Problem', 'Instance', 'Solver']).transform(lambda x: x['UpperBound'] - x['LowerBound'])
print(x)
def instances_table(problems, outputdir=None):
fields = ['Problem', 'Instances']
rows = []
for p in problems:
rows.append((p.name, len(p.get_dzns())))
df = pd.DataFrame(rows, columns=fields)
df.sort_values(['Instances', 'Problem'], ascending=[False, True], inplace=True)
df = df.replace(LABELS.keys(), LABELS.values())
if outputdir:
df.to_latex(os.path.join(outputdir, 'instances.tex'), index=False)
print(df)
def agg_mult_results(x):
if len(x.status.unique() == 1):
res = x.median()
res['status'] = x[0]['status']
else:
res = x
return res
def solver_performance_graph(bounded_logs, unbounded_logs, outputdir, combined_log=None):
if combined_log:
df = pd.read_csv(combined_log)
df = join_on_common_tasks(df[df.bounds != 'No'], df[df.bounds == 'No'])
else:
df = join_on_common_tasks(pd.read_csv(bounded_logs), pd.read_csv(unbounded_logs))
num_solvers = df.solver.nunique()
if outputdir:
_, axes = plt.subplots(nrows=1, ncols=num_solvers, figsize=figsize_text(1.0, height_ratio=0.6))
else:
_, axes = plt.subplots(nrows=num_solvers, ncols=1, sharex=True)
df['mzn'] = df['mzn'].str.replace('_boundest.mzn', '.mzn')
aggdf = df.reset_index()
aggdf = aggdf[aggdf.status == 'COMPLETE'].groupby(['solver', 'mzn', 'bounds', 'time_solver']).count().groupby(
level=[0, 1, 2]).cumsum().reset_index()
legends = []
num_mzns = df.mzn.nunique()
for solver_ax, solver in zip(axes, sorted(df.solver.unique().tolist())):
solver_ax.set_title(LABELS[solver])
for c, p in zip(sns.color_palette("hls", num_mzns), df.mzn.unique().tolist()):
df_filter = (aggdf.solver == solver) & (aggdf.mzn == p)
print(p, solver, df_filter.sum())
bnd = 'Both'
if aggdf[df_filter & (aggdf.bounds == bnd)].shape[0] == 0 or \
aggdf[df_filter & (aggdf.bounds == 'No')].shape[0] == 0:
continue
l = aggdf[df_filter & (aggdf.bounds == bnd)].plot(x='time_solver', y='dzn', ax=solver_ax, linestyle='-',
c=c, label='{} ({})'.format(LABELS[p], bnd))
aggdf[df_filter & (aggdf.bounds == 'No')].plot(x='time_solver', y='dzn', ax=solver_ax, linestyle='--',
c=c, label='{} (None)'.format(LABELS[p]))
if solver == 'gecode':
legends.append(mpatches.Patch(color=c, label=LABELS[p]))
# solver_ax.legend_.remove()
solver_ax.set_xlabel('Time (in s)')
solver_ax.set_xlim([0, 1200])
if solver == 'chuffed':
solver_ax.set_ylabel('Completed Instances')
sns.despine()
# axes[1].legend()
if outputdir:
plt.tight_layout()
plt.savefig(os.path.join(outputdir, 'solver.pgf'), dpi=500, bbox_inches='tight', pad_inches=0)
else:
plt.show()
def solver_performance_table(bounded_logs, unbounded_logs, outputdir, combined_log=None):
# Per-solver and problem
# - No. Complete
# - Avg. runtime of complete
# - Avg. quality of incomplete
if combined_log:
df = pd.read_csv(combined_log)
df = join_on_common_tasks(df[df.bounds != 'No'], df[df.bounds == 'No'])
else:
df = join_on_common_tasks(pd.read_csv(bounded_logs), pd.read_csv(unbounded_logs))
cats = ['COMPLETE', 'SOLFOUND', 'UNSATISFIABLE', 'UNKNOWN', 'FAILED', 'INTERMEDIATE']
df['status'] = df['status'].astype("category") # .cat.reorder_categories(cats, ordered=True)
df['status'].cat.set_categories(cats, ordered=True)
df['status'] = df['status'].cat.as_ordered()
df = df[(df.status != 'INTERMEDIATE')]
completion_df = df.groupby(['bounds', 'mzn', 'status'])['status'].count().reset_index(name="count")
print(completion_df.pivot_table(values='count', columns='bounds', index=['mzn', 'status']))
# Qualitative difference
baseline = data_loader.get_best_results(dzn_filter=df.dzn.unique().tolist())
diff_keys = ['solver', 'mzn', 'dzn', 'bounds', 'status', 'objective', 'time_solver']
if 'backtracks' in df.columns:
diff_keys += ['backtracks']
if 'propagations' in df.columns:
diff_keys += ['propagations']
if 'normal_propagations' in df.columns:
diff_keys += ['normal_propagations']
diff_df = df[diff_keys]
# diff_df = diff_df[diff_df.solver != 'gecode']
diff_df = diff_df.groupby(['solver', 'mzn', 'dzn'], as_index=False)
diff_df = diff_df.apply(lambda g: any_improvement(g))
objective_df = diff_df.groupby(['solver', 'mzn', 'objective']).count().reset_index().rename(
columns={'objective': 'impact', 'time': 'objective'})
time_df = diff_df.groupby(['solver', 'mzn', 'time']).count().reset_index().rename(
columns={'time': 'impact', 'objective': 'time'})
xdf = objective_df.set_index(['solver', 'mzn', 'impact']).join(time_df.set_index(['solver', 'mzn', 'impact']),
how='outer')
# xdf = xdf.rename(columns={'objective': 'time', 'time': 'objective'})
xdf = xdf.groupby(['solver', 'mzn']).apply(lambda x: x / x.sum() * 100)
xdf = xdf.pivot_table(index='mzn', columns=['solver', 'impact'], values=['objective', 'time'])
xdf = xdf.swaplevel(0, 1, axis='columns').sort_index(axis='columns', level=0)
xdf = xdf.fillna(0)
xdf = xdf.astype(int)
# xdf.replace(LABELS.keys(), LABELS.values(), inplace=True)
print(xdf)
if outputdir:
xdf.rename(columns=LABELS, inplace=True)
column_format = 'lrrrrrr' + '|rrrrrr' * (len(df.solver.unique()) - 2)
output_string = xdf.to_latex(multicolumn=True, multicolumn_format='c', column_format=column_format,
index_names=False)
output_string = output_string.replace('\\midrule\n', '')
output_string = output_string.replace('OR-Tools \\\\\n', 'OR-Tools \\\\\n\\midrule\n')
output_string = output_string.replace('\\\nBin Packing', '\\\n\\midrule\nBin Packing')
output_string = output_string.replace('\\\nAll', '\\\n\\midrule\nAll')
if 'Both' in df.bounds.unique():
filename = 'solver_improvement_o2_num.tex'
else:
filename = 'solver_improvement_o1_num.tex'
open(os.path.join(outputdir, filename), 'w').write(output_string)
return
diff_df = df[diff_keys] # .groupby(['solver', 'mzn', 'dzn', 'bounds'], as_index=False).agg(agg_mult_results)
diff_df = diff_df.groupby(['solver', 'mzn', 'dzn'], as_index=False)
diff_df = diff_df.apply(lambda g: quantitative_difference(g, baseline))
rows_with_impact = (diff_df['objective_diff'].notnull() & diff_df['objective_diff'] > 0.0) | (
diff_df['time_diff'].notnull() & diff_df['time_diff'] > 0.0)
imp_df = diff_df[rows_with_impact].reset_index()
all_df = imp_df[['mzn', 'solver', 'time_diff', 'objective_diff']].groupby(['solver', 'mzn']).mean()
all_df['count'] = imp_df[['mzn', 'solver', 'time_diff', 'objective_diff']].groupby(['solver', 'mzn']).count()[
'time_diff'] / diff_df.groupby(['solver', 'mzn']).count()['time_diff']
all_df[['count', 'time_diff', 'objective_diff']] *= 100
all_df.reset_index(inplace=True)
all_df.replace(LABELS.keys(), LABELS.values(), inplace=True)
all_df = all_df.pivot_table(index='mzn', columns=['solver'], margins=False, aggfunc='median',
values=['count', 'objective_diff', 'time_diff'])
all_df = all_df.swaplevel(0, 1, axis='columns').sort_index(axis='columns', level=0)
all_df.rename(columns=LABELS, inplace=True)
all_df.fillna(0, inplace=True)
all_df = all_df.round().astype(int)
if outputdir:
out_df = all_df # .reset_index()[['mzn', 'solver', 'anyofthem']]
# out_df.replace(LABELS.keys(), LABELS.values(), inplace=True)
# out_df = out_df.pivot_table(index='mzn', columns='solver', margins=True, values='anyofthem', margins_name='All')
# out_df.fillna(0, inplace=True)
out_df.rename(columns=LABELS, inplace=True)
column_format = 'lrrr' + '|rrr' * (len(df.solver.unique()) - 1)
output_string = out_df.to_latex(multicolumn=True, multicolumn_format='c', column_format=column_format,
index_names=False)
output_string = output_string.replace('\\midrule\n', '')
output_string = output_string.replace('OR-Tools \\\\\n', 'OR-Tools \\\\\n\\midrule\n')
output_string = output_string.replace('\\\nBin Packing', '\\\n\\midrule\nBin Packing')
output_string = output_string.replace('\\\nAll', '\\\n\\midrule\nAll')
if 'Both' in df.bounds.unique():
filename = 'solver_improvement_o2.tex'
else:
filename = 'solver_improvement_o1.tex'
open(os.path.join(outputdir, filename), 'w').write(output_string)
total_time_unbound = df[df['bounds'] == 'No']['time_solver'].sum()
total_time_bound = df[df['bounds'] == 'Upper']['time_solver'].sum()
print('Total time saved: {} - {} = {}'.format(total_time_unbound, total_time_bound,
total_time_unbound - total_time_bound))
print(all_df)
all_df.to_clipboard()
# More detailed descriptions
if any(diff_df['solver_diff'] != 0.0):
output_string = description_table(diff_df, 'solver_diff')
print('Solver Diff.')
print(output_string)
wilcox_df = diff_df.groupby(['mzn']).apply(lambda x: wilcoxon(x['solver_diff']))
print(wilcox_df)
if any(diff_df['objective_diff'] != 0.0):
output_string = description_table(diff_df, 'objective_diff')
print('Objective Diff.')
print(output_string)
wilcox_df = diff_df.groupby(['mzn']).apply(lambda x: wilcoxon(x['objective_diff']))
print(wilcox_df)
output_string = description_table(diff_df, 'time_diff')
print('Time Diff.')
print(output_string)
wilcox_df = diff_df.groupby(['mzn']).apply(lambda x: wilcoxon(x['time_diff']))
print(wilcox_df)
open(os.path.join(outputdir, 'solver_describe.tex'), 'w').write(output_string)
def description_table(diff_df, column):
describe_df = diff_df[diff_df[column] != 0.0].groupby(['mzn']).describe()[column] * 100
describe_df['count'] /= 100
describe_df.fillna(0, inplace=True)
describe_df = describe_df.round().astype(int)
describe_df['Mean'] = describe_df[['mean', 'std']].apply(lambda x: '±'.join([str(y) for y in x]), axis=1)
# del describe_df['count']
del describe_df['mean']
del describe_df['std']
describe_df = describe_df.reset_index()
describe_df = describe_df.replace(LABELS.keys(), LABELS.values())
describe_df.rename(columns=LABELS, inplace=True)
describe_df.columns = map(str.capitalize, describe_df.columns)
output_string = describe_df.to_latex(index=False)
output_string = output_string.replace('±', '\\(\\pm\\)')
return output_string
def join_on_common_tasks(bounded_df, unbounded_df):
common_task_ids = np.intersect1d(bounded_df.taskid.unique(), unbounded_df.taskid.unique())
bounded_df = bounded_df[bounded_df.taskid.isin(common_task_ids)]
unbounded_df = unbounded_df[unbounded_df.taskid.isin(common_task_ids)]
return pd.concat([bounded_df, unbounded_df])
def any_improvement(group):
unbounded = group[group.bounds == 'No']
bounded = group[group.bounds != 'No']
assert len(unbounded) == 1, "Wrong number of unbounded results"
base_complete = unbounded.iloc[0]['status'] == 'COMPLETE'
if base_complete and bounded.iloc[0]['status'] == 'COMPLETE':
time = np.sign(unbounded.iloc[0]['time_solver'] - bounded.iloc[0]['time_solver'])
elif not base_complete and any(bounded['status'] == 'COMPLETE'):
time = 1
elif base_complete and not any(bounded['status'] == 'COMPLETE'):
time = -1
else:
time = 0
if unbounded.iloc[0]['status'] == 'SOLFOUND' and bounded.iloc[0]['status'] == 'SOLFOUND':
objective = np.sign(unbounded.iloc[0]['objective'] - bounded.iloc[0]['objective'])
if np.isnan(objective):
if np.isnan(unbounded.iloc[0]['objective']) and np.isnan(bounded.iloc[0]['objective']):
objective = 0
elif np.isnan(unbounded.iloc[0]['objective']):
objective = 1
else:
objective = -1
elif unbounded.iloc[0]['status'] in ('SOLFOUND', 'COMPLETE') and bounded.iloc[0]['status'] not in (
'SOLFOUND', 'COMPLETE'):
objective = -1
else:
objective = 0
return pd.Series([int(time), int(objective)], index=['time', 'objective'], dtype=np.int)
def quantitative_difference(group, baseline):
unbounded = group[group.bounds == 'No']
bounded = group[group.bounds != 'No']
if len(unbounded) > 1:
unbounded = group[group.bounds == 'No'].groupby(['solver', 'mzn', 'dzn'], as_index=False).median()
unbounded['status'] = group[group.bounds == 'No']['status'].min()
if len(bounded) > 1:
bounded = group[group.bounds != 'No'].groupby(['solver', 'mzn', 'dzn'], as_index=False).median()
bounded['status'] = group[group.bounds != 'No']['status'].min()
assert len(unbounded) == 1, "Wrong number of unbounded results ({}): {}".format(len(unbounded), bounded)
assert len(bounded) == 1, "Wrong number of bounded results ({}): {}".format(len(bounded), unbounded)
time_ratio = 0.0
gap_diff = 0.0
solver_diff = 0.0
if all(unbounded.status == 'COMPLETE') and all(bounded.status == 'COMPLETE'):
assert (all(unbounded['objective'].isnull()) and all(bounded['objective'].isnull())) or \
all(unbounded['objective'].values == bounded['objective'].values), 'Complete with different objectives'
unbounded_time = unbounded.iloc[0]['time_solver']
bounded_time = bounded.iloc[0]['time_solver']
if abs(unbounded_time - bounded_time) >= 5:
time_ratio = (unbounded_time - bounded_time) / unbounded_time
STAT_KEYS = {
'choco': 'backtracks',
'chuffed': 'propagations',
'ortools': 'normal_propagations'
}
if unbounded.solver.values[0] in STAT_KEYS:
stat_key = STAT_KEYS[unbounded.solver.values[0]]
unbounded_stat = unbounded.iloc[0][stat_key]
bounded_stat = bounded.iloc[0][stat_key]
solver_diff = (unbounded_stat - bounded_stat) / unbounded_stat
elif all(unbounded.status == 'SOLFOUND') and all(bounded['status'].isin(['SOLFOUND'])):
mzn_filter = baseline.mzn == group.mzn.values[0]
dzn_filter = baseline.dzn == group.dzn.values[0]
optimum = min([baseline[mzn_filter & dzn_filter]['objective'].min(),
unbounded.iloc[0]['objective'],
bounded.iloc[0]['objective']])
unbounded_gap = unbounded.iloc[0]['objective'] - optimum
bounded_gap = bounded.iloc[0]['objective'] - optimum
# assert unbounded_gap >= 0, "Unbounded better than prev. best: {}".format(group.dzn.values[0])
# assert bounded_gap >= 0, "Bounded better than prev. best: {}".format(group.dzn.values[0])
if unbounded_gap != 0:
gap_diff = (unbounded_gap - bounded_gap) / unbounded_gap
# if gap_diff < 0:
# print(group, optimum, gap_diff)
return pd.Series([time_ratio, gap_diff, solver_diff],
index=['time_diff', 'objective_diff', 'solver_diff'],
dtype=np.float)
def loss_functions(outputdir=None):
if outputdir:
_, ax = plt.subplots(figsize=figsize_column(1.0, height_ratio=0.7))
else:
_, ax = plt.subplots()
x = np.linspace(-2, 2, 300)
# MSE
ax.plot(x, x ** 2, color='k', linestyle='--', label='Symmetric Loss (Squared Error)')
# Shifted MSE
a = -0.8
y = x ** 2 * np.power(np.sign(x) + a, 2)
ax.plot(x, y, color='b', label='Asymmetric Loss (a = {:.1f})'.format(a))
ax.set_ylabel('Loss')
ax.set_yticklabels([])
ax.yaxis.labelpad = -2
ax.set_xlabel('Residual')
ax.set_xticklabels([])
ax.xaxis.labelpad = -2
ax.set_xlim([x.min(), x.max()])
ax.axvline(0, c='k', linestyle='--', linewidth=0.5, ymin=0, ymax=0.6)
ax.legend(loc=1, frameon=True)
ax.grid(False)
sns.despine(ax=ax)
if outputdir:
plt.tight_layout()
plt.savefig(os.path.join(outputdir, 'losses.pgf'), dpi=500, bbox_inches='tight', pad_inches=0)
else:
plt.show()
def get_row_idx(df, mzn, dzn, solver):
return (df.mzn == mzn) & (df.dzn == dzn) & (df.solver == solver)
def equivalent_solver_time(idx, df, soldf, mzn, dzn, solver):
first_objective = df.loc[idx, 'first_objective'].values[0]
first_sol_time = df.loc[idx, 'first_sol_time'].values[0]
sol_idx = get_row_idx(soldf, mzn, dzn, solver) & (soldf.objective <= first_objective)
unb_equal_time = soldf.loc[sol_idx, 'time'].dropna().min()
if abs(unb_equal_time - first_sol_time) >= 1:
est = 100 * (first_sol_time - unb_equal_time) / unb_equal_time
else:
est = 0
return est
def solver_performance_tables_split(logfile, logfile_fixed, outputdir):
df = pd.read_csv(logfile)
df['origin'] = 'est'
soldf = pd.read_csv(logfile.replace('.csv', '_solutions.csv'))
soldf = pd.DataFrame(soldf[soldf.bounds == 'No'])
dfno = pd.DataFrame(df[df.bounds == 'No'])
dfno['boundstype'] = 'no'
dfo1 = pd.DataFrame(df[(df.bounds == 'Upper') & (df.boundstype == 'hard')])
dfo2 = pd.DataFrame(df[(df.bounds == 'Both') & (df.boundstype == 'hard')])
dffixed = pd.read_csv(logfile_fixed)
dffixed['origin'] = 'fixed'
solfixed = pd.read_csv(logfile_fixed.replace('.csv', '_solutions.csv'))
for s in df.solver.unique():
res_base = get_result_columns(dffixed[dffixed.solver == s], dfno[dfno.solver == s], soldf[soldf.solver == s])
res_base['experiment'] = 'fixed'
res_o1 = get_result_columns(dfo1[dfo1.solver == s], dfno[dfno.solver == s], soldf[soldf.solver == s])
res_o1['experiment'] = 'o1'
res_o2 = get_result_columns(dfo2[dfo2.solver == s], dfno[dfno.solver == s], soldf[soldf.solver == s])
res_o2['experiment'] = 'o2'
result = pd.concat([res_base, res_o1, res_o2])
result = result.replace(LABELS.keys(), LABELS.values())
pdf = pd.pivot_table(result, index='mzn', values=['qof', 'est', 'ttc'], columns='experiment')
pdf.rename(columns={
'est': 'EST',
'qof': 'QOF',
'ttc': 'TTC',
'fixed': 'Fixed',
'o1': 'Upper',
'o2': 'Both'
}, inplace=True)
output_string = pdf.to_latex(multicolumn=True, multicolumn_format='c',
column_format='lRRR|RRR|RRR',
index_names=False, escape=False)
output_string = output_string.replace('\\begin{tabular}', '\\begin{tabularx}{0.97\\textwidth}')
output_string = output_string.replace('\\end{tabular}', '\\end{tabularx}')
print(pdf)
#open(os.path.join(outputdir, 'solver_effects_%s.tex' % s), 'w').write(output_string)
adf = df[['bounds', 'solver', 'mzn', 'first_objective']]
adf['is_complete'] = df['status'] == 'COMPLETE'
adf = adf.groupby(['bounds', 'solver', 'mzn']).agg({'first_objective': 'count', 'is_complete': 'sum'}) / 30 * 100
adf = adf.pivot_table(values=['first_objective', 'is_complete'], index='mzn', columns=['solver', 'bounds'])
adf = adf.round().astype(int)
out = adf.to_latex(multicolumn=True, multicolumn_format='c', index_names=False)
print(adf)
open(os.path.join(outputdir, 'solver_hassol.tex'), 'w').write(out)
def get_result_columns(df, dfno, soldfno):
df = pd.DataFrame(df)
df['est'] = 0
for mzn, dzn, solver in df.set_index(['mzn', 'dzn', 'solver']).index:
hard_idx = get_row_idx(df, mzn, dzn, solver)
df.loc[hard_idx, 'est'] = equivalent_solver_time(hard_idx, df, soldfno, mzn, dzn, solver)
df.set_index(['mzn', 'dzn', 'solver'], inplace=True)
dfno.set_index(['mzn', 'dzn', 'solver'], inplace=True)
df['qof'] = np.round(100 * (df['first_objective'] - dfno['first_objective']) / dfno['first_objective'], 0)
df['ttf'] = np.round(100 * (df['first_sol_time'] - dfno['first_sol_time']) / dfno['first_sol_time'], 0)
df['cmpl_diff'] = (df['status'] == 'COMPLETE').astype(int) - (dfno['status'] == 'COMPLETE').astype(int)
df_complete = df[df.status == 'COMPLETE'].join(dfno[dfno.status == 'COMPLETE'], rsuffix='_no')
df['ttc'] = np.round(100 * (df_complete['time_solver'] - df_complete['time_solver_no']) / df_complete['time_solver_no'], 0)
df.reset_index(inplace=True)
return df[['mzn', 'qof', 'ttf', 'est', 'ttc']].groupby(['mzn'], as_index=False).mean().round(1)
def solver_performance_table_o2(logfile, outputdir):
df = | pd.read_csv(logfile) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
.. module:: citationanalysis
:synopsis: Set of functions for typical bibliometric citation analysis
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import scipy.sparse as spsparse
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize
from pyscisci.utils import isin_sorted, zip2dict, check4columns
from pyscisci.network import dataframe2bipartite
def field_citation_distance(pub2ref_df, pub2field_df, pub2field_norm=True, temporal=True,citation_direction='references',
field_distance_metric='cosine', show_progress=False):
"""
Calculate the field distance matrix based on references or citations.
Parameters
----------
:param pub2ref_df : DataFrame
A DataFrame with the citation information for each Publication.
:param pub2field_df : DataFrame
A DataFrame with the field information for each Publication.
:param pub2field_norm : bool, default True
When a publication occurs in m > 1 fields, count the publication 1/m times in each field. Normalizes the membership
vector so it sums to 1 for each publication.
:param temporal : bool, default False
If True, compute the distance matrix using only publications for each year.
:param citation_direction : str, default `references`
`references` : the fields are defined by a publication's references.
`citations` : the fields are defined by a publication's citations.
:param field_distance_metric : str, default `cosine`
The interfield distance metric. Valid entries come from sklearn.metrics.pairwise_distances:
‘cosine‘, ‘euclidean’, ‘l1’, ‘l2’, etc.
:param show_progress : bool, default False
If True, show a progress bar tracking the calculation.
Returns
-------
Distance DataFrame
if temporal is True
DataFrame with 4 columns: iFieldId, jFieldId, Year, and FieldDistance
if temporal is False
DataFrame with 3 columns: iFieldId, jFieldId, FieldDistance
"""
# now we map citing and cited to the source and target depending on which diretion was specified by `citation_direction'
if citation_direction == 'references':
pub2ref_rename_dict = {'CitedPublicationId':'TargetId', 'CitingPublicationId':'SourceId'}
year_col = 'CitingYear'
elif citation_direction == 'citations':
pub2ref_rename_dict = {'CitedPublicationId':'SourceId', 'CitingPublicationId':'TargetId'}
year_col = 'CitedYear'
required_columns = ['CitedPublicationId', 'CitingPublicationId']
if temporal:
required_columns.append(year_col)
check4columns(pub2ref_df, required_columns)
pub2ref_df = pub2ref_df[required_columns].dropna().copy(deep=True)
check4columns(pub2field_df, ['PublicationId', 'FieldId'])
pub2field_df = pub2field_df.copy(deep=True)
# to leverage matrix operations we need to map fields to the rows/cols of the matrix
field2int = {fid:i for i, fid in enumerate(np.sort(pub2field_df['FieldId'].unique()))}
int2field = {i:fid for fid, i in field2int.items()}
pub2field_df['FieldId'] = [field2int[fid] for fid in pub2field_df['FieldId'].values]
Nfields = len(field2int)
pub2ref_df.rename(columns=pub2ref_rename_dict, inplace=True)
# the assignment of a publication to a field is 1/(number of fields) when normalized, and 1 otherwise
if pub2field_norm:
pub2nfields = pub2field_df.groupby('PublicationId')['FieldId'].nunique()
else:
pub2nfields = defaultdict(lambda:1)
pub2field_df['PubFieldContribution'] = [1.0/pub2nfields[pid] for pid in pub2field_df['PublicationId'].values]
distance_df = []
# differeniate between the temporal and the static RS
if temporal:
for y, ydf in pub2ref_df.groupby(year_col):
# merge the references to the fields for the source fields
ydf = ydf.merge(pub2field_df, how='left', left_on='SourceId', right_on='PublicationId').rename(
columns={'FieldId':'SourceFieldId', 'PubFieldContribution':'SourcePubFieldContribution'})
del ydf['PublicationId']
ydf = ydf.merge(pub2field_df, how='left', left_on='TargetId', right_on='PublicationId').rename(
columns={'FieldId':'TargetFieldId', 'PubFieldContribution':'TargetPubFieldContribution'})
del ydf['PublicationId']
# drop any citation relationships for which we dont have field information
ydf.dropna(inplace=True)
# we need to use integer ids to map to the matrix
ydf[['SourceFieldId', 'TargetFieldId']] = ydf[['SourceFieldId', 'TargetFieldId']].astype(int)
# in the field2field distance matrix, the weighted contribution from a source publication in multiple fields
# is the product of the source and target contributions
ydf['SourcePubFieldContribution'] = ydf['SourcePubFieldContribution'] * ydf['TargetPubFieldContribution']
# calculate the field representation vectors for this year only
yfield2field_mat = dataframe2bipartite(df=ydf, rowname='SourceFieldId', colname='TargetFieldId',
shape=(Nfields, Nfields), weightname='SourcePubFieldContribution')
# now compute the distance matrix for this year only
distance_matrix = pairwise_distances(yfield2field_mat, metric=field_distance_metric)
nnzrow, nnzcol = np.nonzero(distance_matrix)
for isource, itarget in zip(nnzrow, nnzcol):
if isource < itarget:
distance_df.append([int2field[isource], int2field[itarget], y, distance_matrix[isource, itarget]])
distance_df = | pd.DataFrame(distance_df, columns = ['iFieldId', 'jFieldId', year_col, 'FieldDistance']) | pandas.DataFrame |
"""
Pipeline Evaluation module
This module runs all the steps used and allows you to visualize them.
"""
import datetime
from typing import List, Tuple, Union
import pandas as pd
from sklearn.pipeline import Pipeline
from .evaluation import Evaluator
from .feature_reduction import FeatureReductor
from .labeling import Labeler
from .splitting import Splitter
from .utils import Picklable, visualize_data, visualize_labels
class PipelineEvaluator(Picklable):
"""
PipelineEvaluator contains all modules and triggers them.
"""
def __init__(
self,
labeler: Labeler = None,
splitter: Splitter = None,
pipeline: Pipeline = None,
feature_reductor: FeatureReductor = None,
model=None,
evaluator: Evaluator = None,
dropna: bool = True,
downprojector=None,
visualize: Union[bool, List[str]] = False,
verbose: bool = True,
):
self.labeler = labeler
self.splitter = splitter
self.pipeline = pipeline
self.feature_reductor = feature_reductor
self.model = model
self.evaluator = evaluator
self.dropna = dropna
self.downprojector = downprojector
self.visualize = visualize
self.verbose = verbose
if isinstance(self.visualize, bool):
if self.visualize:
self.visualize = [
"labeler",
"splitter",
"pipeline",
"feature_reductor",
"model",
"evaluator",
]
else:
self.visualize = []
def _log(self, text) -> None:
"""
Print actual time and provided text if verobse is True.
Parameters
----------
text: string
Comment added to printed time.
"""
if self.verbose:
print(datetime.datetime.now().time().strftime("%H:%M:%S.%f")[:-3], text)
def _drop_na(self, X: pd.DataFrame, y: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
"""
Drop rows with NaN values from begining.
Returns
-------
X, y : tupple (pd.DataFrame, pd.Series)
X as data (with features) and y as labels.
"""
original_shape = X.shape
X.dropna(axis=1, thresh=int(X.shape[0] * 0.9), inplace=True)
cut_number = X.isna().sum().max()
X = X.iloc[cut_number:, :]
if X.isna().sum().sum() > 0:
X = X.dropna(axis=0)
y = y.loc[X.index]
self._log(
f"\tOriginal shape:\t\t{original_shape}; \n\t\tshape after removing NaNs: {X.shape}."
)
return X, y
def run(self, data=None):
"""
Run each module on provided data.
Parameters
----------
data : array-like
Data to evaluate the pipeline on.
Returns
-------
result : dict
Dict of calculated metric values labeled by their names.
"""
if self.labeler is not None:
self._log("Labeling data")
self.labels = self.labeler.transform(data)
if "labeler" in self.visualize:
self.labeler.visualize(labels=self.labels)
if self.splitter is not None:
self._log("Splitting data")
(
self.X_train,
self.X_test,
self.y_train,
self.y_test,
) = self.splitter.transform(X=data, y=self.labels)
if "splitter" in self.visualize:
self.splitter.visualize(X=[self.X_train, self.X_test])
if self.pipeline is not None:
self._log("Fitting pipeline")
self.X_train = self.pipeline.fit_transform(self.X_train, self.y_train)
self._log("Applying pipeline transformations")
self.X_test = self.pipeline.transform(self.X_test)
if self.dropna:
self.X_train, self.y_train = self._drop_na(X=self.X_train, y=self.y_train)
self.X_test, self.y_test = self._drop_na(X=self.X_test, y=self.y_test)
if "pipeline" in self.visualize:
visualize_data(
X=self.X_train,
y=self.y_train,
downprojector=self.downprojector,
title="Visualization of pipeline output",
)
if self.feature_reductor is not None:
self._log("Applying feature reduction")
self.feature_reductor.fit(self.X_train, self.y_train)
self.X_train = self.feature_reductor.transform(self.X_train)
self.X_test = self.feature_reductor.transform(self.X_test)
if "feature_reductor" in self.visualize:
self.feature_reductor.visualize(
X=self.X_train,
y=self.y_train,
downprojector=self.downprojector,
title="Visualization of FeatureReductor output",
)
if self.model is not None:
self._log("Fitting model")
self.model.fit(self.X_train, self.y_train)
if "model" in self.visualize:
self.y_pred = self.model.predict(self.X_train)
if len(self.y_pred.shape) == 1 or self.y_pred.shape[1] == 1:
self.y_pred = pd.Series(self.y_pred, index=self.X_train.index)
else:
self.y_pred = | pd.DataFrame(self.y_pred, index=self.X_train.index) | pandas.DataFrame |
import numpy as np
# 2013-10-31 Added MultiRate class, simplified fitting methods, removed full_output parameter
# 2014-12-18 Add loading of Frequency, Integration time and Iterations, calculate lower
# bound on errors from Poisson distribution
# 2015-01-28 Simplified fitting again. Needs more work
# 2015-03-02 Added functions for number density calculation
_verbosity = 2
def set_verbosity(level):
"""
0: serious/unrecoverable error
1: recoverable error
2: warning
3: information
"""
global _verbosity
_verbosity = level
def warn(message, level):
if level <= _verbosity:
print(message)
def fitter(p0, errfunc, args):
from lmfit import minimize
result = minimize(errfunc, p0, args=args, nan_policy="omit")
if not result.success:
msg = " Optimal parameters not found: " + result.message
raise RuntimeError(msg)
for i, name in enumerate(result.var_names):
if result.params[name].value == result.init_vals[i]:
warn("Warning: fitter: parameter \"%s\" was not changed, it is probably redundant"%name, 2)
from scipy.stats import chi2
chi = chi2.cdf(result.chisqr, result.nfree)
if chi > 0.5: pval = -(1-chi)*2
else: pval = chi*2
pval = 1-chi
return result.params, pval, result
def dict2Params(dic):
from lmfit import Parameters
if isinstance(dic, Parameters): return dic.copy()
p = Parameters()
for key, val in dic.items():
p.add(key, value=val)
return p
P = dict2Params
class Rate:
def __init__(self, fname, full_data=False, skip_iter=[]):
import re
import datetime as dt
fr = open(fname)
state = -1
npoints = 0
nions = 0
pointno = 0
iterno = 0
ioniter = []
ionname = []
frequency = 0
integration = 0
poisson_error = True
# -1 header
# 0 init
# 1 read time
# 2 read data
for lineno, line in enumerate(fr):
# read header
if state == -1:
if lineno == 2:
T1 = line[:22].split()
T2 = line[22:].split()
self.starttime = dt.datetime.strptime(" ".join(T1), "%Y-%m-%d %H:%M:%S.%f")
self.stoptime = dt.datetime.strptime(" ".join(T2), "%Y-%m-%d %H:%M:%S.%f")
if lineno == 3:
state = 0
toks = line.split()
if len(toks) == 0:
continue
if state == 0:
if re.search("Period \(s\)=", line):
frequency = 1/float(re.search("Period \(s\)=([0-9.]+)", line).group(1))
if re.search("Frequency=", line):
frequency = float(re.search("Frequency=([0-9.]+)", line).group(1))
if re.search("Integration time \(s\)", line):
integration = float(re.search("Integration time \(s\)=([0-9.]+)", line).group(1))
if re.search("Number of Points=", line):
npoints = int(re.search("Number of Points=(\d+)", line).group(1))
if re.search("Number of Iterations=", line):
self.niter = int(re.search("Number of Iterations=(\d+)", line).group(1))
if toks[0] == "[Ion":
nions += 1
if re.search("^Iterations=", line) :
ioniter.append(int(re.search("Iterations=(\d+)", line).group(1)))
if re.search("^Name=", line) :
ionname.append(re.search("Name=(.+)$", line).group(1).strip('\"'))
if toks[0] == "Time":
if len(toks)-2 != nions:
print("Corrupt file", fname, "Wrong number of ions in the header. Trying to recover")
# Assume that the Time header is correct:
nions = len(toks)-2
ioniter = ioniter[:nions]
if len(ioniter) < nions:
warn("Corrupt file " + str(fname) + ": Iterations for all species not recorded, guessing...", 1)
while len(ioniter) < nions:
ioniter.append(ioniter[-1])
if len(ionname) < nions:
warn("Corrupt file " + str(fname) + ": Names for all species not recorded, making something up...", 2)
ionname += toks[len(ionname)+2:]
state = 1
time = []
data = np.zeros((nions, npoints, self.niter))
continue
if state == 1:
try:
newtime = float(toks[0])
except ValueError:
if pointno != npoints:
warn("Corrupt file " + fname + " trying to guess number of points", 2)
npoints = pointno
data.resize((nions, npoints, self.niter))
time = np.array(time)
state = 2
else:
time.append(newtime)
pointno += 1
if state == 2:
if toks[0] == "Iteration":
iterno = int(toks[1])-1
if iterno+1 > self.niter:
warn("Corrupt file " + fname + " trying to guess number of iterations", 2)
#msg = "Corrupt file: " + fname
#raise IOError(msg)
self.niter = iterno+1
data.resize((nions, npoints, self.niter))
pointno = 0
continue
try:
data[:, pointno, iterno] = [float(x) for x in toks][1:-1]
except ValueError:
warn("Error in file " + fname + " number of ions probably wrong")
pointno += 1
ioniter = np.array(ioniter)
# in case of multiple measurements per iteration
if iterno+1 != self.niter:
if self.niter % (iterno+1) != 0:
msg = "Corrupt file: " + fname
print(("Corrupt file " + fname + " trying to guess number of iterations:" + str(iterno+1)))
if iterno+1 < self.niter:
data = data[:,:,:iterno+1]
else:
newdata = np.zeros((nions, npoints, iterno+1))
newdata[:,:,:self.niter] = data
print(data, newdata)
data = newdata
#data.resize((nions, npoints, iterno+1))
self.niter = iterno+1
data = data[:,:,:iterno+1]
#print skip_iter, np.shape(skip_iter)
if len(skip_iter)!=0:
skip_iter = np.array(skip_iter)
indices = np.ones(self.niter, dtype=bool)
indices[skip_iter] = False
data = data[:,:,indices]
# XXX frequency is sometimes wrong in the files
# use some heuristics to estimate the frequency
# repetition time is usually set in multiples of 0.1s
measurement_time = np.ceil(time[-1]/0.1)*0.1
if frequency*measurement_time > 1.1 or frequency*measurement_time < 0.4:
warn("Recorded frequency in " + fname + " is probably wrong. Using estimate %f" % (1/measurement_time), 1)
frequency = 1/measurement_time
# this is later used to estimate Poisson error
self.total_iterations = ioniter[:,None]*integration*frequency*self.niter
self.nions = nions
self.ionname = ionname
self.time = time
self.data = data
self.fname = fname
self.average()
if not full_data:
self.data = None
self.mask = None
def average(self):
data_mean = np.mean(self.data, axis=2)
data_std = np.std(self.data, axis=2)/np.sqrt(self.niter)
#print(np.shape(self.data), np.shape(data_mean), np.shape(self.total_iterations))
data_counts = data_mean*self.total_iterations
# divide by sqrt(total_iterations) twice - once to get Poisson
# variance of measured data and once to the error of estimated mean
# this should be verified, but it is in agreement with errors obtained
# by treating data as normal variables for large numbers
data_poiss_err = np.sqrt(np.maximum(data_counts, 3))/self.total_iterations
# we assume that if 0 counts are observed, 3 counts is within confidence interval
# we use std error if it is larger than poisson error to capture other sources
# of error e.g. fluctuations
data_std = np.maximum(data_std, data_poiss_err)
self.data_mean = data_mean
self.data_std = data_std
def merge(self, rate2):
self.data_mean = np.concatenate((self.data_mean, rate2.data_mean), axis=1)
self.data_std = np.concatenate((self.data_std, rate2.data_std), axis=1)
self.time = np.concatenate((self.time, rate2.time), axis=0)
#print " ** merging ** "
#print self.data_mean, self.data_std, self.time
def poisson_test1(self):
shape = np.shape(self.data_mean)
#check only H- XXX
shape = (1, shape[1])
pval = np.zeros(shape)
for specno in range(shape[0]):
for pointno in range(shape[1]):
if self.mask != None:
dataline = self.data[specno, pointno, self.mask[specno, pointno, :]]
else:
dataline = self.data[specno, pointno, :]
mean = np.mean(dataline)
Q = np.sum((dataline-mean)**2)/mean
niter = len(dataline[~np.isnan(dataline)])
dof = niter-1
from scipy.stats import chi2
chi = chi2.cdf(Q, dof)
if chi > 0.5: pval[specno, pointno] = (1-chi)*2
else: pval[specno, pointno] = chi*2
print((chi, Q, pval[specno, pointno]))
return np.min(pval)
def cut3sigma(self, nsigma=3):
shape = np.shape(self.data)
self.mask = np.zeros(shape, dtype=bool)
for specno in range(shape[0]):
for pointno in range(shape[1]):
stddev = self.data_std[specno, pointno]*np.sqrt(self.niter)
low = self.data_mean[specno, pointno] - nsigma*stddev
high = self.data_mean[specno, pointno] + nsigma*stddev
dataline = self.data[specno, pointno, :]
mask = (dataline > low) & (dataline < high)
#self.data[specno, pointno, ~mask] = float("nan")
self.mask[specno, pointno, :] = mask
self.data_mean[specno, pointno] = np.mean(dataline[mask])
self.data_std[specno, pointno] = np.std(dataline[mask])/np.sqrt(self.niter)
#data_mean = np.mean(self.data[self.mask], axis=2)
#data_std = np.std(self.data, axis=2)/np.sqrt(self.niter)
#print self.data_mean, self.data_std
#self.data[self.data<120] = 130
def fit_ode_mpmath(self, p0=[60.0, .1], columns=[0]):
from mpmath import odefun
def fitfunc(p, x):
eqn = lambda x, y: -p[1]*y
y0 = p[0]
f = odefun(eqn, 0, y0)
g = np.vectorize(lambda x: float(f(x)))
return g(x)
return self._fit(fitfunc, p0, columns)
def fit_ode_scipy(self, p0=[60.0, .1], columns=[0]):
from scipy.integrate import odeint
def fitfunc(p, x):
eqn = lambda y, x: -p[1]*y
y0 = p[0]
t = np.r_[0., x]
y = odeint(eqn, y0, t)
return y[1:,0]
return self._fit(fitfunc, p0, columns)
def fit_inc(self, p0=[1.0, .01, 0.99], columns=[1]):
#fitfuncinc = lambda p, x: p[0]*(1-np.exp(-x/p[1]))+p[2]
fitfunc = lambda p, x: -abs(p[0])*np.exp(-x/abs(p[1]))+abs(p[2])
return self._fit(fitfunc, p0, columns)
def fit_equilib(self, p0=[70.0, .1, 1], columns=[0]):
fitfunc = lambda p, x: abs(p[0])*np.exp(-x/abs(p[1]))+abs(p[2])
return self._fit(fitfunc, p0, columns)
class MultiRate:
def __init__(self, fnames, directory=""):
if isinstance(fnames, str): fnames = [fnames]
self.rates = [Rate(directory+fname, full_data=True) for fname in fnames]
# if True, a normalization factor for each rate with respect to rates[0] is a free fitting param
self.normalized = True
self.norms = [1]*len(self.rates)
self.fitfunc = None
self.fitparam = None
self.fitresult = None
self.fitcolumns = None
self.fitmask = slice(None)
self.fnames = fnames
self.sigma_min = 0.01 # lower bound on the measurement accuracy
def plot_to_file(self, fname, comment=None, figsize=(6,8.5), logx=False, *args, **kwargs):
import matplotlib.pyplot as plt
from lmfit import fit_report
f = plt.figure(figsize=figsize)
ax = f.add_axes([.15, .5, .8, .45])
self.plot(ax=ax, show=False, *args, **kwargs)
ax.set_yscale("log")
if logx: ax.set_xscale("log")
ax.legend(loc="lower right", fontsize=5)
ax.set_title(comment, size=8)
if self.fitresult is not None:
f.text(0.1, 0.44, "p-value = %.2g\n"%self.fitpval
+ fit_report(self.fitresult, min_correl=0.5), size=6, va="top", family='monospace')
if ax.get_ylim()[0] < 1e-4: ax.set_ylim(bottom=1e-4)
ax.set_xlabel(r"$t (\rm s)$")
ax.set_ylabel(r"$N_{\rm i}$")
if self.fitresult is not None:
ax2 = f.add_axes([.55, .345, .40, .10])
if logx: ax2.set_xscale("log")
self.plot_residuals(ax=ax2, show=False, weighted=True)
ax2.tick_params(labelsize=7)
ax2.set_title("weighted residuals", size=7)
ax2.set_xlabel(r"$t (\rm s)$", size=7)
ax2.set_ylabel(r"$R/\sigma$", size=7)
f.savefig(fname, dpi=200)
plt.close(f)
def plot(self, ax=None, show=False, plot_fitfunc=True, symbols=["o", "s", "v", "^", "D", "h"], colors=["r", "g", "b", "m", "k", "orange"],\
opensymbols=False, fitfmt="-", fitcolor=None, hide_uncertain=False, plot_columns=None):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
lines = {}
if plot_columns is None: plot_columns = range(self.rates[0].nions)
for i in plot_columns:
if opensymbols:
kwargs = {"markeredgewidth":1, "markerfacecolor":"w", "markeredgecolor": colors[i], "color":colors[i]}
else:
kwargs = {"markeredgewidth":0, "color":colors[i]}
l = None
for j, rate in enumerate(self.rates):
norm = 1/self.norms[j]
I = rate.data_std[i] < rate.data_mean[i] if hide_uncertain else slice(None)
if l==None:
l = ax.errorbar(rate.time[I], rate.data_mean[i][I]*norm, yerr=rate.data_std[i][I]*norm, label=rate.ionname[i],
fmt = symbols[i], **kwargs)
color = l.get_children()[0].get_color()
else:
l = ax.errorbar(rate.time[I], rate.data_mean[i][I]*norm, yerr=rate.data_std[i][I]*norm,
fmt = symbols[i], color=color, markeredgewidth=0)
lines[i] = l
# plot sum
for j, rate in enumerate(self.rates):
# calculate the sum over the plotted data only
S = np.sum(rate.data_mean[plot_columns], axis=0)
label = "sum" if j==0 else None
ax.plot(rate.time, S/self.norms[j], ".", c="0.5", label=label)
if self.fitfunc != None and self.fitparam != None:
mintime = np.min([np.min(r.time[self.fitmask]) for r in self.rates])
maxtime = np.max([np.max(r.time[self.fitmask]) for r in self.rates])
x = np.logspace(np.log10(mintime), np.log10(maxtime), 500)-self.fit_t0
x = x[x>=0.]
fit = self.fitfunc(self.fitparam, x)
for i, column in enumerate(self.fitcolumns):
if column not in plot_columns: continue
if fitcolor == None: c = lines[column].get_children()[0].get_color()
else: c = fitcolor
ax.plot(x+self.fit_t0, fit[i], fitfmt, c=c)
if len(self.fitcolumns) > 1:
ax.plot(x+self.fit_t0, np.sum(fit, axis=0), c="k")
if show == True:
ax.set_yscale("log")
ax.legend()
plt.show()
return ax
def plot_residuals(self, ax=None, show=False, weighted=False, symbols=["o", "s", "v", "^", "D", "h"], colors=["r", "g", "b", "m", "k", "orange"],\
opensymbols=False, plot_columns=None):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if plot_columns is None: plot_columns = range(self.rates[0].nions)
cdict = {col: i for i, col in enumerate(plot_columns)}
lines = {}
for j, rate in enumerate(self.rates):
t = rate.time[self.fitmask]
#print("\n"*3 + "*"*80)
#print(rate.fname)
fit = self.fitfunc(self.fitparam, t-self.fit_t0)
for i, column in enumerate(self.fitcolumns):
"""
print("\n"*2 + "*"*3 + " " + rate.ionname[column])
print(rate.time)
print(t - self.fit_t0)
print(rate.data_mean[column])
print(rate.data_std[column])
print(fit[i])
print((rate.data_mean[column][self.fitmask] - fit[i])/rate.data_std[column][self.fitmask])
"""
if column in plot_columns:
j = cdict[column]
if weighted:
ax.plot(t, (rate.data_mean[column][self.fitmask] - fit[i])/rate.data_std[column][self.fitmask],
symbols[j], color=colors[j], lw=0.5, ms=2)
else:
ax.errorbar(t, rate.data_mean[column][self.fitmask] - fit[i], yerr=rate.data_std[column][self.fitmask],
fmt=symbols[j], color=colors[j], lw=0.5, ms=2)
#ax.set_yscale("symlog", linthresh=10)
if show == True:
ax.set_yscale("log")
ax.legend()
plt.show()
return ax
def save_data(self, filename):
to_save = []
for j, rate in enumerate(self.rates):
norm = 1/self.norms[j]
to_save.append(np.hstack((rate.time[:,np.newaxis], rate.data_mean.T, rate.data_std.T)))
to_save = np.vstack(to_save)
np.savetxt(filename, to_save)
def save_fit(self, filename, time = None):
if time is None:
mintime = np.min([np.min(r.time[self.fitmask]) for r in self.rates])
maxtime = np.max([np.max(r.time[self.fitmask]) for r in self.rates])
time = np.logspace(np.log10(mintime), np.log10(maxtime), 500)
time = time[time-self.fit_t0 >= 0.]
fit = self.fitfunc(self.fitparam, time-self.fit_t0)
to_save = np.vstack((time, np.vstack(fit))).T
np.savetxt(filename, to_save)
def save_data_fit_excel(self, filename, time=None, normalize=False, metadata={}):
import pandas as pd
dfs = []
for j, rate in enumerate(self.rates):
df = pd.DataFrame(rate.time, columns=["tt"])
if self.normalized:
df["norm"] = 1/self.norms[j]
if normalize:
norm = 1/self.norms[j]
else:
norm = 1
for k, name in enumerate(rate.ionname):
df[name] = rate.data_mean[k]*norm
df[name+"_err"] = rate.data_std[k]*norm
df["rate"] = rate.fname
dfs.append(df)
df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with | tm.assertRaisesRegexp(ValueError, label_error) | pandas.util.testing.assertRaisesRegexp |
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
sns.set_theme(style="whitegrid")
class Datos():
def __init__(self, ruta):
self.ruta = ruta
def leerCSV(self):
datos_leidos = | pd.read_csv(self.ruta) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat May 19 17:14:29 2018
@author: GTayl
"""
################################## Set-up ##########################################
# Import the required packages
import pandas as pd
import time
import os
from pandas import ExcelWriter
from pandas import ExcelFile
# Change the working directory
os.chdir("C:\\Users\\GTayl\\Desktop\\Finance Modeling\\Wikipedia")
cwd = os.getcwd()
from Wikipedia_Page_Features import Get_Wiki_Page_Data
################################## Data-Prep ##########################################
# Read in Seed List
seed_file = "Machine_Learning_Seed_List.xlsx"
seed_import = pd.read_excel(cwd+"\\Seeds\\"+seed_file,names=['Page','Tag'],header=None)
# Dedoop Seeds and Define Tags
# Obtain deduped seed list
seed_list = pd.DataFrame(seed_import['Page']).drop_duplicates()
def seed_tag_collector(seed_import, seed):
# Subset Dataframe for Seed Entry only
temp = seed_import[seed_import['Page']==seed]
# Get a list of all the tags that apply to that seed and convert to a Kumu compliant text string
tag_list = list(temp['Tag'])
tag_string = ""
for tag in tag_list:
tag_string = tag_string+str(tag)+"|"
tag_string = tag_string[:-1]
return(tag_string)
# Generate Seed List with Coresponding Tags
seed_list['Tags'] = ""
seed_list['Tags'] = seed_list.apply(lambda row: seed_tag_collector(seed_import, row['Page']),axis=1)
# test = seed_list.head(100)
# seed_list = test
################################## Wikipedia API Call ##########################################
# Initalize Master Lists
Master_Node_List = pd.DataFrame(columns=['Label','Tags','Description','Average_pg_views'])
Master_Direct_Edge_List = pd.DataFrame(columns=['To','From','Strength','Tag'])
Master_Implied_Edge_List = pd.DataFrame(columns=['To','From','Strength','Tag'])
# API Call for Seed Set
for index, row in seed_list.iterrows():
print("Collecting data for: "+str(row['Page']))
page = Get_Wiki_Page_Data(str(row['Page']))
# Append node features
Master_Node_List = Master_Node_List.append({'Label':page['title'], 'Tags':row['Tags'],'Description':page['description'], 'Average_pg_views':page['avg_page_views']}, ignore_index=True)
# Append edge features
for e in page['explicit_links']:
Master_Direct_Edge_List = Master_Direct_Edge_List.append({"To":str(e), "From":page['title'], "Strength":1, "Tag":"Direct"}, ignore_index=True)
for e in page['implied_links']:
Master_Implied_Edge_List = Master_Implied_Edge_List.append({"To":str(e), "From":page['title'], "Strength":1, "Tag":"Implied"}, ignore_index=True)
time.sleep(1.5)
# Cleaning Direct Edge List
# Cleaned_Edges = Master_Edge_List[Master_Edge_List['Tag']=='Direct']
################################## Wikipedia API Call - Secondary Links ##########################################
Cleaned_Edges = | pd.DataFrame(Master_Direct_Edge_List['To']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.