prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import logging
import numpy as np
from sumolib import checkBinary
import pandas as pd
import traci
import traci.constants as tc
import xml.etree.cElementTree as ET
ILD_LENGTH = 100
VER_LENGTH = 5
# N_LANES = 3
NEIGHBOR_MAP = {'I0':['I1', 'I3'],
'I1':['I0', 'I2', 'I4'],
'I2':['I1', 'I5'],
'I3':['I0', 'I4', 'I6'],
'I4':['I1', 'I3', 'I5', 'I7'],
'I5':['I2', 'I4', 'I8'],
'I6':['I3', 'I7'],
'I7':['I4', 'I6', 'I8'],
'I8':['I5', 'I7']}
PHASE_MAP = {0:'GGGrrrrrGGGrrrrr', 1:'yyyrrrrryyyrrrrr',
2:'rrrGrrrrrrrGrrrr', 3:'rrryrrrrrrryrrrr',
4:'rrrrGGGrrrrrGGGr', 5:'rrrryyyrrrrryyyr',
6:'rrrrrrrGrrrrrrrG', 7:'rrrrrrryrrrrrrry'}
WIND_MAP = {'I0':{'P0':0, 'I3':0, 'P6':3, 'I1':3},
'I1':{'P1':0, 'I4':0, 'I0':3, 'I2':3},
'I2':{'P2':0, 'I5':0, 'I1':3, 'P9':3},
'I3':{'I0':0, 'I6':0, 'P7':3, 'I4':3},
'I4':{'I1':0, 'I7':0, 'I3':3, 'I5':3},
'I5':{'I2':0, 'I8':0, 'I4':3, 'P10':3},
'I6':{'I3':0, 'P3':0, 'P8':3, 'I7':3},
'I7':{'I4':0, 'P4':0, 'I6':3, 'I8':3},
'I8':{'I5':0, 'P5':0, 'I7':3, 'P11':3}}
class TrafficNode:
def __init__(self, name, neighbor=[]):
self.name = name
self.neighbor = neighbor
self.lanes_in = []
self.ilds_in = []
# self.phase_id = -1
class TrafficEnv:
def __init__(self, cfg_sumo, output_path='./logs/', port=4343, gui=False):
self.cfg_sumo = cfg_sumo
self.port = port
self.cur_episode = 0
self.margin = 13.6
self.neighbor_map = NEIGHBOR_MAP
self.phase_map = PHASE_MAP
self.ild_length = ILD_LENGTH
self.ver_length = VER_LENGTH
self.wind_map = WIND_MAP
self.sim_seed = 42
self.name = 'Grid9'
self.agent = 'ma2c'
self.output_path = output_path
self.control_interval_sec = 5
self.yellow_interval_sec = 2
self.episode_length_sec = 3600
self.coef_reward = 0.1
# self.T = np.ceil(self.episode_length_sec / self.control_interval_sec)
# params need reset
self.cur_step = 0
# if not os.path.exists(self.output_path+'/logs'):
# os.makedirs(self.output_path+'./logs')
self.metric_data = []
self.step_data = []
# self.metrics_file = self.output_path + 'metrics.csv'
# with open(self.metrics_file, 'w') as f:
# f.write('episode,time,step,number_total_car,number_departed_car,number_arrived_car,avg_wait_sec,avg_speed_mps,avg_queue\n')
# self.step_file = self.output_path + 'step.csv'
# with open(self.step_file, 'w') as f:
# f.write('episode,time,step,action,reward_jam,reward_waiting,reward,total_reward\n')
if gui:
app = 'sumo-gui'
else:
app = 'sumo'
command = [checkBinary(app), "--start", '-c', self.cfg_sumo]
command += ['--seed', str(self.sim_seed)]
command += ['--no-step-log', 'True']
command += ['--time-to-teleport', '300']
command += ['--no-warnings', 'True']
command += ['--duration-log.disable', 'True']
# command += ['--tripinfo-output',
# self.output_path + ('%s_%s_trip.xml' % (self.name, self.agent))]
traci.start(command, port=self.port)
self.nodes = self._init_node()
self.nodes_name = sorted(list(self.nodes.keys()))
s = 'Env: init %d node information:\n' % len(self.nodes_name)
for node_name in self.nodes_name:
s += node_name + ':\n'
s += '\tneigbor: %s\n' % str(self.nodes[node_name].neighbor)
logging.info(s)
for node_name in self.nodes_name:
traci.junction.subscribeContext(node_name, tc.CMD_GET_VEHICLE_VARIABLE, self.ild_length,
[tc.VAR_LANE_ID, tc.VAR_LANEPOSITION,
tc.VAR_SPEED, tc.VAR_WAITING_TIME])
def _init_node(self):
nodes = {}
for node_name in traci.trafficlight.getIDList():
if node_name in self.neighbor_map:
neighbor = self.neighbor_map[node_name]
else:
logging.info('node %s can not be found' % node_name)
neighbor = []
nodes[node_name] = TrafficNode(node_name, neighbor)
nodes[node_name].lanes_in = traci.trafficlight.getControlledLanes(node_name)
nodes[node_name].ilds_in = nodes[node_name].lanes_in
return nodes
def _get_obs(self, cx_res):
height = int(self.ild_length/self.ver_length)
position, phase = {}, {}
for node_name in self.nodes_name:
width = int(len(self.nodes[node_name].lanes_in)/2) - 2
# print(self.nodes[node_name].lanes_in)
position[node_name] = np.zeros(shape=(height, width))
phase[node_name] = np.zeros(shape=(int(len(self.phase_map)/2)))
current_phase = int(traci.trafficlight.getPhase(node_name)/2)
phase[node_name][current_phase] = 1
if not cx_res:
return [position, phase]
for node_name, res in cx_res.items():
if not res:
continue
for _, mes in res.items():
f_node, t_node, lane = mes[tc.VAR_LANE_ID].split('_')
if t_node == node_name:
wind = self._get_position_windex(f_node, t_node, lane)
if f_node[0] == 'I':
hind = int((500 - 2 * self.margin - mes[tc.VAR_LANEPOSITION]) / self.ver_length)
elif f_node[0] == 'P':
hind = int((200 - self.margin - mes[tc.VAR_LANEPOSITION]) / self.ver_length)
if hind < 0 or hind >= height:
logging.info(str(res))
raise ValueError(str(hind)+' h_ind is wrong')
position[node_name][hind, wind] += 1
if np.amax(position[node_name]) > 2:
raise ValueError('max value of position need <= 2')
return [position, phase]
def _get_position_windex(self, from_node, to_node, n_lane):
return int(n_lane) + self.wind_map[to_node].get(from_node)
def _get_reward(self, cx_res, action):
reward = {}
reward_jam = {}
reward_waiting = {}
for node_name in self.nodes_name:
if not cx_res:
reward[node_name], reward_jam[node_name], reward_waiting[node_name] = 0, 0, 0
continue
res = cx_res.get(node_name)
if res is None:
reward[node_name], reward_jam[node_name], reward_waiting[node_name] = 0, 0, 0
continue
jam_length, waitingtime = 0, 0
for ild in self.nodes[node_name].ilds_in:
jam_length += traci.lanearea.getJamLengthVehicle(ild)
for _, mes in res.items():
_, t_node, _ = mes[tc.VAR_LANE_ID].split('_')
if t_node == node_name:
waitingtime += mes[tc.VAR_WAITING_TIME]
reward_jam[node_name] = -jam_length
reward_waiting[node_name] = -waitingtime
reward[node_name] = -jam_length - self.coef_reward * waitingtime
return reward, reward_jam, reward_waiting
def _measure_step(self):
cars = traci.vehicle.getIDList()
num_tot_car = len(cars)
num_in_car = traci.simulation.getDepartedNumber()
num_out_car = traci.simulation.getArrivedNumber()
if num_tot_car > 0:
avg_waiting_time = np.mean([traci.vehicle.getWaitingTime(car) for car in cars])
avg_speed = np.mean([traci.vehicle.getSpeed(car) for car in cars])
else:
avg_speed = 0
avg_waiting_time = 0
# all trip-related measurements are not supported by traci,
# need to read from outputfile afterwards
queues = []
for node_name in self.nodes_name:
for ild in self.nodes[node_name].ilds_in:
queues.append(traci.lane.getLastStepHaltingNumber(ild))
avg_queue = np.mean(np.array(queues))
cur_traffic = {'episode': self.cur_episode,
'time_sec': self.cur_step,
'step': self.cur_step / self.control_interval_sec,
'number_total_car': num_tot_car,
'number_departed_car': num_in_car,
'number_arrived_car': num_out_car,
'avg_wait_sec': avg_waiting_time,
'avg_speed_mps': avg_speed,
'avg_queue': avg_queue}
self.metric_data.append(cur_traffic)
def _simulate(self, num_steps):
for _ in range(num_steps):
traci.simulationStep()
self.cur_step += 1
def step(self, action):
for node_name in self.nodes_name:
a = action[node_name]
current_phase = traci.trafficlight.getPhase(node_name)
next_phase = (current_phase + a) % len(self.phase_map)
traci.trafficlight.setPhase(node_name, next_phase)
self._simulate(self.yellow_interval_sec)
for node_name in self.nodes_name:
a = action[node_name]
current_phase = traci.trafficlight.getPhase(node_name)
next_phase = (current_phase + a) % len(self.phase_map)
traci.trafficlight.setPhase(node_name, next_phase)
self._simulate(self.control_interval_sec-self.yellow_interval_sec)
self._measure_step()
cx_res = {node_name: traci.junction.getContextSubscriptionResults(node_name) \
for node_name in self.nodes_name}
obs = self._get_obs(cx_res)
reward, reward_jam, reward_waiting = self._get_reward(cx_res, action)
done = True if self.cur_step >= self.episode_length_sec else False
info = {'episode': self.cur_episode,
'time': self.cur_step,
'step': self.cur_step / self.control_interval_sec,
'action': [action[node_name] for node_name in self.nodes_name],
'reward_jam':[reward_jam[node_name] for node_name in self.nodes_name],
'reward_waiting':[reward_waiting[node_name] for node_name in self.nodes_name],
'reward': [reward[node_name] for node_name in self.nodes_name],
'total_reward': np.sum([reward[node_name] for node_name in self.nodes_name])}
self.step_data.append(info)
return obs, reward, done, info
def reset(self, gui=False):
# return obs
# for node_name in self.nodes_name:
# self.nodes[node_name].reset()
self.cur_episode += 1
self.cur_step = 0
# self.close()
if gui:
app = 'sumo-gui'
else:
app = 'sumo'
command = ['--start','-c', self.cfg_sumo]
# command += ['--seed', str(self.sim_seed)]
# command += ['--no-step-log', 'True']
# command += ['--time-to-teleport', '300']
# command += ['--no-warnings', 'True']
# command += ['--duration-log.disable', 'True']
# command += ['--tripinfo-output',
# self.output_path + ('%s_%s_trip.xml' % (self.name, self.agent))]
traci.load(command)
s = 'Env: init %d node information:\n' % len(self.nodes_name)
for node_name in self.nodes_name:
s += node_name + ':\n'
s += '\tneigbor: %s\n' % str(self.nodes[node_name].neighbor)
logging.info(s)
for node_name in self.nodes_name:
traci.junction.subscribeContext(node_name, tc.CMD_GET_VEHICLE_VARIABLE, self.ild_length,
[tc.VAR_LANE_ID, tc.VAR_LANEPOSITION,
tc.VAR_SPEED, tc.VAR_WAITING_TIME])
cx_res = {node_name: traci.junction.getContextSubscriptionResults(node_name) \
for node_name in self.nodes_name}
return self._get_obs(cx_res)
def close(self):
traci.close()
def seed(self, seed=None):
if seed:
np.random.seed(seed)
def output_data(self):
step_data = | pd.DataFrame(self.step_data) | pandas.DataFrame |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.function.textanalytics.doc_summarize import doc_summarizer_eng, doc_summarizer_kor
import unittest
import numpy as np
import pandas as pd
import HtmlTestRunner
import os
class DocumentSummerizerKorean(unittest.TestCase):
def setUp(self):
print("*** Document Summerizer(Korean) UnitTest Start ***")
self.testdata = | pd.DataFrame({'text':['이것은 우리 가족 이야기. 아버지가 방에 들어가신다. 어머니는 청소하신다. 우리는 행복하다.', '이것은 우리 가족 이야기이다. 아빠가 들어가신다. 엄마는 청소하신다. 우리는 행복한 가족이다.']}) | pandas.DataFrame |
#!/usr/bin/env -S python3 -u # -*- python -*-
# imclust.py (c) R.Jaksa 2021
# imclust_dbscan.py - extended version of imclust.py (by <NAME>)
import sys,os
import time
start_time = time.time()
# -------------------------- parse command-line arguments: dirname and no. of clusters
HELP = f"""
NAME
imclust_dbscan.py - image clustering demo
USAGE
imclust_dbscan.py [OPTIONS] DIRECTORY...
DESCRIPTION
Image clusteuring demo imclust_dbscan.py will cluster images in
the DIRECTORY, and produce a html visualization of results.
OPTIONS
-h This help.
-m Models of NN to provide a numerical representations of images.
Accepted inputs: see documentation https://www.tensorflow.org/api_docs/python/tf/keras/applications - section 'functions'.
-e The maximum distance between two samples for one to be considered as in the neighborhood of the other.
VERSION
imclust.py 0.1 (c) R.Jaksa 2021
imclust_dbscan.py - extended version of imclust.py (by <NAME>)
"""
import argparse
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-h","--help",action="store_true")
parser.add_argument("-e","--eps",type=str,default="0.5")
parser.add_argument("-m","--models",type=str,default="ResNet50")
parser.add_argument("path",type=str,nargs='*')
args = parser.parse_args()
if args.help or len(args.path)==0:
print(HELP)
exit(0)
# ---------------------------------------------------------- get image names from dirs
print(f"====================================")
print(f"=Loading names of images from dirs.=")
print(f"====================================")
print(f"...")
from glob import glob
import random
path = []
for dir in args.path:
path += glob(dir+"/**/*.png",recursive=True)
path += glob(dir+"/**/*.jpg",recursive=True)
random.shuffle(path)
print(f"=========================")
print(f"=Names of images loaded.=")
print(f"=========================")
# ------------------------------------------------------------------------- load model
print(f"====================")
print(f"=Loading NN models.=")
print(f"====================")
print(f"...")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
models = args.models
models = models.split(",")
# models_dict = {}
models_names = []
models_list = []
# model = None
# if args.model == 'DenseNet121':
if 'DenseNet121' in models:
model = tf.keras.applications.densenet.DenseNet121(include_top=False,weights="imagenet",input_shape=(224,224,3))
# models_dict.update({'DenseNet121': model})
models_names.append('DenseNet121')
models_list.append(model)
# elif args.model == 'DenseNet169':
if 'DenseNet169' in models:
model = tf.keras.applications.densenet.DenseNet169(include_top=False,weights="imagenet",input_shape=(224,224,3))
# models_dict.update({'DenseNet169': model})
models_names.append('DenseNet169')
models_list.append(model)
if 'DenseNet201' in models:
model = tf.keras.applications.densenet.DenseNet201(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('DenseNet201')
models_list.append(model)
if 'EfficientNetB0' in models:
model = tf.keras.applications.efficientnet.EfficientNetB0(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB0')
models_list.append(model)
if 'EfficientNetB1' in models:
model = tf.keras.applications.efficientnet.EfficientNetB1(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB1')
models_list.append(model)
if 'EfficientNetB2' in models:
model = tf.keras.applications.efficientnet.EfficientNetB2(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB2')
models_list.append(model)
if 'EfficientNetB3' in models:
model = tf.keras.applications.efficientnet.EfficientNetB3(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB3')
models_list.append(model)
if 'EfficientNetB4' in models:
model = tf.keras.applications.efficientnet.EfficientNetB4(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB4')
models_list.append(model)
if 'EfficientNetB5' in models:
model = tf.keras.applications.efficientnet.EfficientNetB5(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB5')
models_list.append(model)
if 'EfficientNetB6' in models:
model = tf.keras.applications.efficientnet.EfficientNetB6(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB6')
models_list.append(model)
if 'EfficientNetB7' in models:
model = tf.keras.applications.efficientnet.EfficientNetB7(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('EfficientNetB7')
models_list.append(model)
if 'InceptionResNetV2' in models:
model = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('InceptionResNetV2')
models_list.append(model)
if 'InceptionV3' in models:
model = tf.keras.applications.inception_v3.InceptionV3(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('InceptionV3')
models_list.append(model)
if 'MobileNet' in models:
model = tf.keras.applications.mobilenet.MobileNet(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('MobileNet')
models_list.append(model)
if 'MobileNetV2' in models:
model = tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('MobileNetV2')
models_list.append(model)
if 'MobileNetV3Large' in models:
model = tf.keras.applications.MobileNetV3Large(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('MobileNetV3Large')
models_list.append(model)
if 'MobileNetV3Small' in models:
model = tf.keras.applications.MobileNetV3Small(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('MobileNetV3Small')
models_list.append(model)
# elif args.model == 'NASNetLarge':
# model = tf.keras.applications.nasnet.NASNetLarge(include_top=False,weights="imagenet",input_shape=(331,331,3))
if 'NASNetMobile' in models:
model = tf.keras.applications.nasnet.NASNetMobile(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('NASNetMobile')
models_list.append(model)
if 'ResNet101' in models:
model = tf.keras.applications.ResNet101(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('ResNet101')
models_list.append(model)
if 'ResNet101V2' in models:
model = tf.keras.applications.ResNet101V2(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('ResNet101V2')
models_list.append(model)
if 'ResNet152' in models:
model = tf.keras.applications.ResNet152(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('ResNet152')
models_list.append(model)
if 'ResNet152V2' in models:
model = tf.keras.applications.ResNet152V2(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('ResNet152V2')
models_list.append(model)
if 'ResNet50' in models:
model = tf.keras.applications.resnet50.ResNet50(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('ResNet50')
models_list.append(model)
if 'ResNet50V2' in models:
model = tf.keras.applications.ResNet50V2(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('ResNet50V2')
models_list.append(model)
if 'VGG16' in models:
model = tf.keras.applications.VGG16(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('VGG16')
models_list.append(model)
if 'VGG19' in models:
model = tf.keras.applications.VGG19(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('VGG19')
models_list.append(model)
if 'Xception' in models:
model = tf.keras.applications.xception.Xception(include_top=False,weights="imagenet",input_shape=(224,224,3))
models_names.append('Xception')
models_list.append(model)
print(f"===================")
print(f"=NN models loaded.=")
print(f"===================")
# ------------------------------------------------------------------------ load images
print(f"=======================================")
print(f"=Loading images and embedding vectors.=")
print(f"=======================================")
print(f"...")
from imageio import imread
from skimage.transform import resize
import numpy as np
from sklearn.decomposition import PCA
from multiprocessing.pool import ThreadPool
np.warnings.filterwarnings("ignore",category=np.VisibleDeprecationWarning)
def loadresize(path):
try:
image = imread(str(path))
except:
print(f"\nmalformed image: {path}\n")
return
image = resize(image,SIZE,anti_aliasing=True)
return image
SIZE = (224,224,3)
# pca = PCA(n_components=256)
pca_list = []
# vectors = np.empty([0,256],dtype=np.float32)
vectors = [np.empty([0,256],dtype=np.float32)]*len(models)
# images = np.empty([0,224,224,3],dtype=np.float32)
n_imgs = 0
i=0
while i < len(path):
i2 = i + 256
## imgs = np.array([imread(str(p)).astype(np.float32) for p in path[i:i2]])
## imgs = np.asarray([resize(image,SIZE,0) for image in imgs])
## images = np.concatenate((images, imgs),0)
imgs = np.empty([0,SIZE[0],SIZE[1],SIZE[2]],dtype=np.float32)
pool = ThreadPool(os.cpu_count())
results = []
for p in path[i:i2]:
results.append(pool.apply_async(loadresize,args=(p,)))
pool.close()
pool.join()
imgs = np.array([r.get() for r in results])
n_imgs += len(imgs)
# ------------------------------------------------------------- get embeddings vectors
for j in range(len(models)):
vector = models_list[j].predict(imgs)
vector = vector.reshape(vector.shape[0],-1)
if i == 0:
pca = PCA(n_components=256)
pca.fit(vector)
pca_list.append(pca)
vector = pca_list[j].transform(vector)
vectors[j] = np.concatenate((vectors[j], vector),0)
i += 256
print(f"======================================")
print(f"=Images and embedding vectors loaded.=")
print(f"======================================")
# ----------------------------------------------------------------------- cluster them
print(f"====================")
print(f"=DBSCAN clustering.=")
print(f"====================")
print(f"...")
from sklearn.cluster import DBSCAN
eps = args.eps
eps = eps.split(",")
clusterings = []
for i in range(len(models)):
clusterings.append([])
for j in range(len(eps)):
clustering = DBSCAN(eps=float(eps[j]))
# clustering.fit(vectors[i])
# cl = clustering.predict(vectors[i])
cl = clustering.fit_predict(vectors[i])
clusterings[i].append(cl)
# print(f"clusters: {cl}")
print(f"===========================")
print(f"=DBSCAN clustering - DONE.=")
print(f"===========================")
# ------------------------------------------------ copy images according their cluster
# import shutil
# for i in range(len(images)):
# if not os.path.exists(f"output/cluster{cluster[i]}"): os.makedirs(f"output/cluster{cluster[i]}")
# print(f"cp {path[i]} output/cluster{cluster[i]}")
# shutil.copy2(f"{path[i]}",f"output/cluster{cluster[i]}")
# -------------------------------------------------------------------------- excluding outliers
vectors_wo = []
clusterings_wo = []
for i in range(len(models)):
vectors_wo.append([])
clusterings_wo.append([])
for j in range(len(eps)):
vectors_wo[i].append([])
clusterings_wo[i].append([])
for k in range(len(clusterings[i][j])):
if clusterings[i][j][k] != -1:
vectors_wo[i][j].append(vectors[i][k])
clusterings_wo[i][j].append(clusterings[i][j][k])
print(f"================================")
print(f"=Calculating indices (metrics).=")
print(f"================================")
print(f"...")
# -------------------------------------------------------------------------- mean silhouette coefficient (plot + file)
from sklearn.metrics import silhouette_score
import pandas as pd
import matplotlib.pyplot as plt
# models_names = list(slovnik.keys())
MSC = []
MSC_wo = []
for i in range(len(models)):
MSC.append([])
MSC_wo.append([])
for j in range(len(eps)):
MSC[i].append(silhouette_score(vectors[i],clusterings[i][j]))
MSC_wo[i].append(silhouette_score(vectors_wo[i][j],clusterings_wo[i][j]))
frame = pd.DataFrame({'eps':eps, 'MSC':MSC[i]})
plt.figure(figsize=(12,6))
plt.plot(frame['eps'], frame['MSC'], marker='o')
plt.xlabel('Epsilon')
plt.ylabel('MSC')
plt.title('DBSCAN: Mean Silhouette Coefficient (MSC) - ' + models_names[i])
plt.savefig('MSC_' + models_names[i] + '_dbscan.png')
frame.to_csv(r'MSC_' + models_names[i] + '_dbscan.txt', index=None, sep='\t', mode='a')
frame = | pd.DataFrame({'eps':eps, 'MSC_wo':MSC_wo[i]}) | pandas.DataFrame |
"""prosper.datareader.coins: utilities for looking up info on cryptocoins"""
from enum import Enum
import pandas as pd
# TODO: Simplify import #
import prosper.datareader.config as config
import prosper.datareader.exceptions as exceptions
import prosper.datareader.cryptocompare as cryptocompare
import prosper.datareader.hitbtc as hitbtc
class Sources(Enum):
hitbtc = 'hitbtc'
cc = 'cryptocompare'
class OrderBook(Enum):
"""enumerator for handling order book info"""
asks = 'asks'
bids = 'bids'
class OHLCfrequency(Enum):
"""enumerator for OHLC scopes"""
minute = 'minute'
hour = 'hour'
day = 'day'
def address(self):
"""help figure out which address to use"""
if self == self.minute:
return 'https://min-api.cryptocompare.com/data/histominute'
elif self == self.hour:
return 'https://min-api.cryptocompare.com/data/histohour'
elif self == self.day:
return 'https://min-api.cryptocompare.com/data/histoday'
else: # pragma: no cover
raise exceptions.InvalidEnum()
def columns_to_yahoo(
quote_df,
source
):
"""recast column names to yahoo equivalent
Args:
quote_df (:obj:`pandas.DataFrame`): dataframe to update
source (:obj:`Enum`): source info
Returns:
(:obj:`pandas.DataFrame`): updated dataframe cols
"""
if source == Sources.hitbtc:
index_key = 'symbol'
quote_df = quote_df.rename(index=quote_df[index_key])
elif source == Sources.cc:
## Remap column names ##
index_key = 'Name'
column_map = {
'CoinName': 'name',
'FullName': 'more_info',
'Name': 'symbol',
'TotalCoinSupply': 'shares_outstanding',
'TotalCoinsFreeFloat': 'float_shares',
'LASTVOLUME': 'volume',
'MKTCAP': 'market_capitalization',
'CHANGEPCT24HOUR': 'change_pct',
'MARKET': 'stock_exchange',
'OPEN24HOUR': 'open',
'HIGH24HOUR': 'high',
'LOW24HOUR': 'low',
'PRICE': 'last',
'LASTUPDATE': 'timestamp'
}
## Trim unused data ##
keep_keys = list(column_map.keys())
keep_keys.append(index_key)
drop_keys = list(set(list(quote_df.columns.values)) - set(keep_keys))
quote_df = quote_df.drop(drop_keys, 1)
## Apply remap ##
quote_df = quote_df.rename(
columns=column_map,
index=quote_df[index_key])
quote_df['change_pct'] = quote_df['change_pct'] / 100
else: # pragma: no cover
raise exceptions.UnsupportedSource()
## reformat change_pct ##
quote_df['change_pct'] = list(map(
'{:+.2%}'.format,
quote_df['change_pct']
))
## Timestamp to datetime ##
quote_df['datetime'] = pd.to_datetime(
pd.to_numeric(quote_df['timestamp']),
infer_datetime_format=True,
#format='%Y-%m-%dT%H:%M:%S',
errors='coerce'
)
return quote_df
def supported_symbol_info(
key_name,
source=Sources.hitbtc
):
"""find unique values for key_name in symbol feed
Args:
key_name (str): name of key to search
source (:obj:`Enum`): source name
Returns:
(:obj:`list`): list of unique values
"""
if isinstance(source, str):
source = Sources(source)
if source == Sources.hitbtc:
symbols_df = pd.DataFrame(hitbtc.quotes.get_supported_symbols_hitbtc())
elif source == Sources.cc:
symbols_df = pd.DataFrame(cryptocompare.quotes.get_supported_symbols_cc())
else: # pragma: no cover
raise exceptions.UnsupportedSource()
unique_list = list(symbols_df[key_name].unique())
return unique_list
def get_symbol_hitbtc(
commodity_ticker,
currency_ticker,
logger=config.LOGGER
):
"""get valid ticker to look up
Args:
commodity_ticker (str): short-name for crypto coin
currency_ticker (str): short-name for currency
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(str): valid ticker for HITBTC
"""
logger.info('--Fetching symbol list from API')
symbols_df = pd.DataFrame(hitbtc.quotes.get_supported_symbols_hitbtc())
symbol = symbols_df.query(
'commodity==\'{commodity}\' & currency==\'{currency}\''.format(
commodity=commodity_ticker.upper(),
currency=currency_ticker.upper()
))
if symbol.empty:
raise exceptions.SymbolNotSupported()
return symbol['symbol'].iloc[0]
def get_quote_hitbtc(
coin_list,
currency='USD',
to_yahoo=False,
logger=config.LOGGER
):
"""fetch common summary data for crypto-coins
Args:
coin_list (:obj:`list`): list of tickers to look up'
currency (str, optional): currency to FOREX against
to_yahoo (bool, optional): convert names to yahoo analog
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): coin info for the day, JSONable
"""
logger.info('Generating quote for %s -- HitBTC', config._list_to_str(coin_list))
logger.info('--validating coin_list')
ticker_list = hitbtc.quotes.coin_list_to_symbol_list(
coin_list,
currency=currency,
strict=True
)
logger.info('--fetching ticker data')
raw_quote = hitbtc.quotes.get_ticker_hitbtc('')
quote_df = pd.DataFrame(raw_quote)
if to_yahoo:
logger.info('--converting column names to yahoo style')
quote_df = columns_to_yahoo(quote_df, Sources.hitbtc)
logger.info('--filtering ticker data')
quote_df = quote_df[quote_df['symbol'].isin(ticker_list)]
quote_df = quote_df[list(quote_df.columns.values)].apply(pd.to_numeric, errors='ignore')
quote_df['change_pct'] = (quote_df['last'] - quote_df['open']) / quote_df['open'] * 100
logger.debug(quote_df)
return quote_df
def get_orderbook_hitbtc(
coin,
which_book,
currency='USD',
logger=config.LOGGER
):
"""fetch current orderbook from hitBTC
Args:
coin (str): name of coin to fetch
which_book (str): Enum, 'asks' or 'bids'
currency (str, optional): currency to FOREX against
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): current coin order book
"""
logger.info('Generating orderbook for %s -- HitBTC', coin)
order_enum = OrderBook(which_book) # validates which order book key to use
logger.info('--validating coin')
symbol = hitbtc.quotes.coin_list_to_symbol_list(
[coin],
currency=currency,
strict=True
)[0]
logger.info('--fetching orderbook')
raw_orderbook = hitbtc.quotes.get_order_book_hitbtc(symbol)[which_book]
orderbook_df = pd.DataFrame(raw_orderbook, columns=['price', 'ammount'])
orderbook_df['symbol'] = symbol
orderbook_df['coin'] = coin
orderbook_df['orderbook'] = which_book
logger.debug(orderbook_df)
return orderbook_df
def get_quote_cc(
coin_list,
currency='USD',
coin_info_df=None,
to_yahoo=False,
logger=config.LOGGER
):
"""fetch common summary data for crypto-coins
Args:
coin_list (:obj:`list`): list of tickers to look up'
currency (str, optional): currency to FOREX against
coin_info_df (:obj:`pandas.DataFrame`, optional): coin info (for caching)
to_yahoo (bool, optional): convert names to yahoo analog
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): coin info for the day, JSONable
"""
logger.info('Generating quote for %s -- CryptoCompare', config._list_to_str(coin_list))
# TODO: only fetch symbol list when required?
if coin_info_df is None:
logger.info('--Gathering coin info')
coin_info_df = pd.DataFrame(cryptocompare.quotes.get_supported_symbols_cc())
else:
# make sure expected data is in there
headers = list(coin_info_df.columns.values)
assert 'Name' in headers # avoid merge issue
logger.info('--Fetching ticker data')
ticker_df = pd.DataFrame(cryptocompare.quotes.get_ticker_cc(coin_list, currency=currency))
logger.info('--combining dataframes')
quote_df = pd.merge(
ticker_df, coin_info_df,
how='inner',
left_on='FROMSYMBOL',
right_on='Name'
)
if to_yahoo:
logger.info('--converting headers to yahoo format')
quote_df = columns_to_yahoo(
quote_df,
Sources.cc
)
quote_df = quote_df[list(quote_df.columns.values)].apply(pd.to_numeric, errors='ignore')
logger.debug(quote_df)
return quote_df
def get_ohlc_cc(
coin,
limit,
currency='USD',
frequency=OHLCfrequency.day,
logger=config.LOGGER
):
"""gather OHLC data for given coin
Args:
coin (str): name of coin to look up
limit (int): total range for OHLC data (max 2000)
currency (str, optional): currency to compare coin to
frequency (:obj;`Enum`, optional): which range to use (minute, hour, day)
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): OHLC data
"""
if isinstance(frequency, str):
frequency = OHLCfrequency(frequency)
logger.info('Fetching OHLC data @%s for %s -- CryptoCompare', frequency.value, coin)
data = cryptocompare.quotes.get_histo_day_cc(
coin,
limit,
currency=currency,
uri=frequency.address()
)
ohlc_df = pd.DataFrame(data)
ohlc_df['datetime'] = | pd.to_datetime(ohlc_df['time'], unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""gender_detection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bu4brssep0L-q5nEmT9OBRykyBbvdu6S
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import feature_extraction, linear_model, model_selection, preprocessing
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch
import librosa
import librosa.display
import os,glob
from tqdm import tqdm_notebook
import re
import random
from random import randint
from sklearn.model_selection import train_test_split
#!pip install wget
#cd "/content/drive/My Drive/gender_detection/"
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
import requests
count=0
#79,80
for i in tqdm_notebook(range(83,84), total=1, unit="epoch"):
print(i)
parser = 'html.parser' # or 'lxml' (preferred) or 'html5lib', if installed
resp = requests.get("https://www.openslr.org/"+str(i)+"/")
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, parser, from_encoding=encoding)
for link in soup.find_all('a', href=True):
l=link["href"].split(".")
if l[len(l)-1]=="zip" or l[len(l)-1]=="tgz":
if l[1]=="openslr":
count=count+1
name=l[len(l)-2].split("/")
#print(link["href"],l[1],name[len(name)-1])
file_url = link["href"]
#print(file_url)
#!wget -c $file_url
# r = requests.get(file_url, stream = True)
# with open("/content/drive/My Drive/gender_detection/"+name[len(name)-1]+"."+l[len(l)-1], "wb") as file:
# for block in r.iter_content(chunk_size = 1024):
# if block:
# file.write(block)
#cd /content/drive/My Drive/gender_detection/female/
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/female/", '*.zip')):
print(filename)
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l[len(l)-1]
l_2=l_1[len(l_1)-1].split(".")
print(l_2[0])
#!unzip $t -d "/content/drive/My Drive/gender_detection/female_unzipped/"
min=100000
count=0
count1=0
g=[]
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/male_unzipped/", '*.wav')):
count1=count1+1
#print(filename)
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l_1[0]
#print(t)
if t not in g:
g.append(t)
#print(count1)
print(g,count1)
#!pip install soundfile
import soundfile as sf
ob = sf.SoundFile("/content/drive/My Drive/gender_detection/male_unzipped/clm_00610_00556859411.wav")
print(ob.samplerate)
for i in range(len(g)):
g=['nom', 'wem', 'mim', 'som', 'irm', 'scm']
data_speech=pd.DataFrame(columns=["S1","sr","Gender"])
hop_length = 512
n_mels =128
n_fft = 2048
#count=0
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/male_unzipped/", '*.wav')):
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l_1[0]
if t == g[i]:
y, sr = librosa.load(filename,sr=None)
#print(librosa.load(filename,sr=None))
# trim silent edges
speech, _ = librosa.effects.trim(y)
#speech=speech[:100000]
if speech.shape[0]>100000:
#print(speech.shape[0])
speech=speech[:100000]
#print(speech.shape[0])
S1=librosa.feature.mfcc(y=speech,sr=sr)
#print(S1)
gender="male"
# if gender == "f":
# gender="female"
# if gender == "m":
# gender = "male"
temp=[]
temp1=[]
temp2=[]
temp.append(np.array(S1))
temp1.append(gender)
temp2.append(np.array(sr))
#print(temp)
df_temp=pd.DataFrame(list(zip(temp,temp2,temp1)),columns=["S1","sr","Gender"])
data_speech=data_speech.append(df_temp)
print(data_speech.shape)
data_speech.to_pickle("/content/drive/My Drive/gender_speech_male_"+str(16+i)+".pkl")
#cd /content/drive/My Drive/gender_detection/spanish
#mkdir spanish
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/aida_tang_1/aidatatang_200zh/corpus/train", '*.tar.gz')):
print(filename)
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l[len(l)-1]
l_2=l_1[len(l_1)-1].split(".")
print(t)
#!tar -xvzf $t -C "/content/drive/My Drive/gender_detection/aida_tang/"
#!tar -xvzf "/content/drive/My Drive/gender_detection/tedx_spanish_corpus.tgz" -C "/content/drive/My Drive/gender_detection/spanish/"
for i in range(len(g)):
data_speech=pd.DataFrame(columns=["S1","sr","Gender"])
hop_length = 512
n_mels =128
n_fft = 2048
#count=0
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/female_1/", '*.wav')):
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l_1[0]
if t == g[i]:
y, sr = librosa.load(filename,sr=None)
#print(librosa.load(filename,sr=None))
# trim silent edges
speech, _ = librosa.effects.trim(y)
#speech=speech[:100000]
if speech.shape[0]>100000:
#print(speech.shape[0])
speech=speech[:100000]
#print(speech.shape[0])
S1=librosa.feature.mfcc(y=speech,sr=sr)
#print(S1)
gender="female"
# if gender == "f":
# gender="female"
# if gender == "m":
# gender = "male"
temp=[]
temp1=[]
temp2=[]
temp.append(np.array(S1))
temp1.append(gender)
temp2.append(np.array(sr))
#print(temp)
df_temp=pd.DataFrame(list(zip(temp,temp2,temp1)),columns=["S1","sr","Gender"])
data_speech=data_speech.append(df_temp)
print(data_speech.shape)
data_speech.to_pickle("/content/drive/My Drive/gender_speech_female_"+str(i+2)+".pkl")
df_1=pd.read_pickle("/content/drive/My Drive/gender_speech_male_1.pkl")
df_1.head()
df_male=pd.DataFrame(columns=["S1","sr","Gender"])
for i in range(16,22):
df_1=pd.read_pickle("/content/drive/My Drive/gender_speech_male_"+str(i)+".pkl")
df_male=df_male.append(df_1)
# train_inputs, test_inputs, train_labels, test_labels = train_test_split(df_male["S1"], df_male["Gender"],random_state=2018, test_size=0.1)
# Scaler=StandardScaler()
# train_inputs=Scaler.fit_transform(train_inputs)
# test_inputs=Scaler.transform(test_inputs)
# data_male_train = {"S1": train_inputs,
# "Gender": train_labels}
# df_male_train = pd.concat(data_male_train,
# axis = 1)
# data_male_test = {"S1": test_inputs,
# "Gender": test_labels}
# df_male_test = pd.concat(data_male_test,
# axis = 1)
df_female=pd.DataFrame(columns=["S1","sr","Gender"])
for i in range(19,24):
df_1=pd.read_pickle("/content/drive/My Drive/gender_speech_female_"+str(i)+".pkl")
df_female=df_female.append(df_1)
# train_inputs, test_inputs, train_labels, test_labels = train_test_split(df_female["S1"], df_female["Gender"],random_state=2018, test_size=0.1)
# Scaler=StandardScaler()
# train_inputs=Scaler.fit_transform(train_inputs)
# test_inputs=Scaler.transform(test_inputs)
# data_female_train = {"S1": train_inputs,
# "Gender": train_labels}
# df_female_train = pd.concat(data_female_train,
# axis = 1)
# data_female_test = {"S1": test_inputs,
# "Gender": test_labels}
# df_female_test = pd.concat(data_female_test,
# axis = 1)
df=pd.DataFrame(columns=["S1","sr","Gender"])
df=df.append(df_male)
df=df.append(df_female)
df=df.sample(frac=1)
# df_test=pd.DataFrame(columns=["S1","Gender"])
# df_test=df_test.append(df_male_test)
# df_test=df_test.append(df_female_test)
# df_test=df_test.sample(frac=1)
df.to_pickle("/content/drive/My Drive/gender_detection/gender_speech_english.pkl")
df_0=pd.read_pickle("/content/drive/My Drive/gender_detection/gender_speech.pkl")
df_1=pd.read_pickle("/content/drive/My Drive/gender_detection/gender_speech_english.pkl")
df=df_0.append(df_1)
df.shape
test=df["Gender"]
train=df.drop("Gender",axis=1)
train=train.drop("sr",axis=1)
X_train,X_test,Y_train,Y_test=train_test_split(train,test,random_state=1,test_size=0.1)
Y_1={"Gender":Y_train}
Y_1= | pd.DataFrame(Y_1) | pandas.DataFrame |
__all__=['xgrab_live', 'xgrab_rate']
import datetime as dt
from parameters import interval, symbol
from binance.client import Client
from BinanceKeys import key, secretKey
import pandas as pd
import json # for parsing what binance sends back to us
import numpy as np # numerical python, i usually need this somewhere
import requests
import traceback
import time as tm
client = Client(key(), secretKey())
def xgrab_rate(symbol, interval):
"""
Gets 500 last rates in a dataframe.
Args:
symbol (str): symbol of the coins (ex: BNBBUSD)
interval (str): time interval (ex: 1h)
Returns:
dataframe: The dataframe containing the 500 last rates.
"""
root_url = "https://api.binance.com/api/v1/klines"
# interval = '1h'
url = root_url + "?symbol=" + symbol + "&interval=" + interval
data = json.loads(requests.get(url).text)
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ***********************************************************************
#
# V2W-BERT: A Python library for vulnerability classification
# <NAME> (<EMAIL>) : Purdue University
# <NAME> (<EMAIL>): Pacific Northwest National Laboratory
#
# ***********************************************************************
#
#
# Copyright © 2022, Battelle Memorial Institute
# All rights reserved.
#
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# 1. Redistributions of source code must retain the above copyright notice, this
#
# list of conditions and the following disclaimer.
#
#
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
#
# this list of conditions and the following disclaimer in the documentation
#
# and/or other materials provided with the distribution.
#
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ## Download and Preprocess Latest Dataset
#
# In this script we first download all CVEs to-date. Use the NVD and Mitre hierarchy documents to prepare a train test validation set.
# ## Import libraries
# In[199]:
import os
import requests, zipfile, io
import pickle
import pandas as pd
import numpy as np
# Here, I have disabled a false alarm that would otherwise trip later in the project.
pd.options.mode.chained_assignment = None
# The datetime library will let me filter the data by reporting date.
from datetime import datetime, timedelta
# Since the NVD data is housed in JavaScript Object Notation (JSON) format, I will need the json_normalize function to access and manipulate the information.
from pandas.io.json import json_normalize
import sys
import torch
import re
from ipynb.fs.full.Dataset import Data
# In[200]:
# Expanding view area to facilitate data manipulation.
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 100)
# In[201]:
import argparse
from argparse import ArgumentParser
def get_configuration():
parser = ArgumentParser()
parser.add_argument('--dir', type=str, default='Dataset')
parser.add_argument('--from_year', type=int, default=2020)
parser.add_argument('--to_year', type=int, default=2022)
parser.add_argument('--from_train_year', type=int, default=1990)
parser.add_argument('--to_train_year', type=int, default=2020)
parser.add_argument('--from_test_year', type=int, default=2021)
parser.add_argument('--to_test_year', type=int, default=2021)
parser.add_argument('--from_val_year', type=int, default=2022)
parser.add_argument('--to_val_year', type=int, default=2022)
parser.add_argument('-f') ##dummy for jupyternotebook
args = parser.parse_args()
dict_args = vars(args)
return args, dict_args
args, dict_args=get_configuration()
print(dict_args)
print(args.dir)
# In[ ]:
# ### Configuration
# In[202]:
class DataPath():
def __init__(self, args, dataset_dir='',results_dir=''):
#File locations
self.PATH_TO_DATASETS_DIRECTORY = dataset_dir+'/NVD/raw/'
self.PATH_TO_RESULTS_DIRECTORY = results_dir+'/NVD/processed/'
self.NVD_CVE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CVE_data.csv'
self.Graph_FILE=self.PATH_TO_RESULTS_DIRECTORY+'GRAPH_data'
self.GRAPHVIZ_HIERARCHY=self.PATH_TO_RESULTS_DIRECTORY+'Hierarchy'
self.MITRE_CWE_FILE=self.PATH_TO_DATASETS_DIRECTORY+'CWE_RC_1000.csv'
self.NVD_CWE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CWE_data.csv'
self.MASK_FILE = self.PATH_TO_RESULTS_DIRECTORY+'NVD_data'
self.MERGED_NVD_CVE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CVE.csv'
self.FILTERED_NVD_CWE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CWE.csv'
self.YEARS=list(range(args.from_year,args.to_year+1))
self.TRAIN_YEARS=list(range(args.from_train_year,args.to_train_year+1))
self.VAL_YEARS=list(range(args.from_val_year,args.to_val_year+1))
self.TEST_YEARS=list(range(args.from_test_year,args.to_test_year+1))
if not os.path.exists(self.PATH_TO_DATASETS_DIRECTORY):
print("Creating directory: ",self.PATH_TO_DATASETS_DIRECTORY)
os.makedirs(self.PATH_TO_DATASETS_DIRECTORY)
if not os.path.exists(self.PATH_TO_RESULTS_DIRECTORY):
print("Creating directory: ",self.PATH_TO_RESULTS_DIRECTORY)
os.makedirs(self.PATH_TO_RESULTS_DIRECTORY)
class Config(DataPath):
def __init__(self,args, dataset_dir='',results_dir=''):
super(Config, self).__init__(args, dataset_dir, results_dir)
self.CLUSTER_LABEL=0
self.download()
def download(self):
for year in self.YEARS:
if not os.path.exists(self.PATH_TO_DATASETS_DIRECTORY+'nvdcve-1.1-'+str(year)+'.json'):
url = 'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-'+str(year)+'.json.zip'
print("Downloading: ",url)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.PATH_TO_DATASETS_DIRECTORY)
print("CVEs downloaded")
if not os.path.exists(self.MITRE_CWE_FILE):
url = 'https://drive.google.com/uc?export=download&id=1-phSamb4RbxyoBc3AQ2xxKMSsK2DwPyn'
print("Downloading: ",url)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.PATH_TO_DATASETS_DIRECTORY)
print("CWEs downloaded")
config=Config(args,dataset_dir=args.dir,results_dir=args.dir)
# ### ProfecessCVES
# In[203]:
def getDataFrame(config):
df = []
counter=0
for year in config.YEARS:
yearly_data = pd.read_json(config.PATH_TO_DATASETS_DIRECTORY+'nvdcve-1.1-'+str(year)+'.json')
if counter == 0:
df = yearly_data
else:
df = df.append(yearly_data)
counter+=1
return df
# In[204]:
def removeREJECT(description):
series=[]
for x in description:
try:
if "REJECT" in (json_normalize(x)["value"])[0]:
series.append(False)
else:
series.append(True)
except:
series.append(False)
return pd.Series(series,index=description.index)
# In[205]:
def removeUnknownCWE(description):
series=[]
for x in description:
try:
if x == "UNKNOWN" or x == "NONE":
series.append(False)
else:
series.append(True)
except:
series.append(False)
return pd.Series(series,index=description.index)
# In[206]:
def getCVEDescription(df):
CVE_entry = []
CVE_index = df["cve.description.description_data"].index
for x in df["cve.description.description_data"]:
try:
raw_CVE_entry = json_normalize(x)["value"][0]
clean_CVE_entry = str(raw_CVE_entry)
CVE_entry.append(clean_CVE_entry)
except:
CVE_entry.append("NONE")
CVE_entry = pd.Series(CVE_entry, index = CVE_index)
return CVE_entry
# In[207]:
# Defining a function which I will use below
def consolidate_unknowns(x):
if x == "NVD-CWE-Other" or x == "NVD-CWE-noinfo":
return "UNKNOWN"
else:
return x
# In[208]:
def getCWEs(df):
CWE_entry = []
CWE_index = df["cve.problemtype.problemtype_data"].index
for x in df["cve.problemtype.problemtype_data"]:
try:
CWE_normalized_json_step_1 = json_normalize(x)
CWE_normalized_json_step_2 = CWE_normalized_json_step_1["description"][0]
CWEs=[]
#print(json_normalize(CWE_normalized_json_step_2)["value"])
for CWE in json_normalize(CWE_normalized_json_step_2)["value"]:
#CWEs.append(consolidate_unknowns(str(CWE)))
CWEs.append(str(CWE))
CWE_entry.append(CWEs)
except:
CWE_entry.append(['NONE'])
CWE_entry = pd.Series(CWE_entry, index = CWE_index)
return CWE_entry
# In[209]:
def ProcessDataset(config):
print("Loading data from file---")
df=getDataFrame(config)
CVE_Items = json_normalize(df["CVE_Items"])
df = pd.concat([df.reset_index(), CVE_Items], axis=1)
df = df.drop(["index", "CVE_Items"], axis=1)
df = df.rename(columns={"cve.CVE_data_meta.ID": "CVE ID"})
CVE_ID = df["CVE ID"]
df.drop(labels=["CVE ID"], axis=1,inplace = True)
df.insert(0, "CVE ID", CVE_ID)
##remove description with REJECT
print("Removing REJECTs---")
df=df[removeREJECT(df["cve.description.description_data"])]
##Extract CVE description
CVE_description=getCVEDescription(df)
df.insert(1, "CVE Description", CVE_description)
##Extract CWEs
print("Extracting CWEs---")
CWE_entry=getCWEs(df)
df.insert(2, "CWE Code", CWE_entry)
# ##Remove CWEs we don't know true label
# print("Removing Unknown CWEs---")
# df=df[removeUnknownCWE(df["CWE Code 1"])]
# Converting the data to pandas date-time format
df["publishedDate"] = pd.to_datetime(df["publishedDate"])
return df
# ### ProcessCWEs
# In[210]:
def processAndSaveCVE(config, LOAD_SAVED=True):
if not os.path.exists(config.NVD_CVE_FILE) or LOAD_SAVED==False:
df=ProcessDataset(config)
df=df[['publishedDate', 'CVE ID', 'CVE Description', 'CWE Code']]
df.to_csv(config.NVD_CVE_FILE,index=False)
else:
df=pd.read_csv(config.NVD_CVE_FILE)
return df
# In[211]:
def ProcessCWE_NVD(config):
# Importing BeautifulSoup and an xml parser to scrape the CWE definitions from the NVD web site
from bs4 import BeautifulSoup
import lxml.etree
# loading the NVD CWE Definitions page and scraping it for the first table that appears
NVD_CWE_description_url = requests.get("https://nvd.nist.gov/vuln/categories")
CWE_definitions_page_soup = BeautifulSoup(NVD_CWE_description_url.content, "html.parser")
table = CWE_definitions_page_soup.find_all('table')[0]
df_CWE_definitions = pd.read_html(str(table))[0]
return df_CWE_definitions
# In[212]:
def ProcessCWE_MITRE(config):
print('Loading CWE file : {0}'.format(config.MITRE_CWE_FILE))
#df_CWE_definitions = pd.read_csv(config.MITRE_CWE_FILE, quotechar='"',delimiter=',', encoding='latin1',index_col=False)
df_CWE_definitions = pd.read_csv(config.MITRE_CWE_FILE, delimiter=',', encoding='latin1',index_col=False)
return df_CWE_definitions
# In[213]:
def processAndSaveCWE(config, LOAD_SAVED=True):
if not os.path.exists(config.MITRE_CWE_FILE) or LOAD_SAVED==False:
df_CWE_MITRE=ProcessCWE_MITRE(config)
df_CWE_MITRE.to_csv(config.MITRE_CWE_FILE,index=False)
else:
df_CWE_MITRE=pd.read_csv(config.MITRE_CWE_FILE, index_col=False)
if not os.path.exists(config.NVD_CWE_FILE) or LOAD_SAVED==False:
df_CWE_NVD=ProcessCWE_NVD(config)
df_CWE_NVD.to_csv(config.NVD_CWE_FILE,index=False)
else:
df_CWE_NVD=pd.read_csv(config.NVD_CWE_FILE,index_col=False)
return df_CWE_MITRE, df_CWE_NVD
# In[214]:
#df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, True)
# In[215]:
#df_CWE_MITRE
#df_CWE_NVD
# In[216]:
def load_preprocessed(config, LOAD_SAVED=True):
df_CVE=processAndSaveCVE(config, LOAD_SAVED)
df_CWE_MITRE, df_CWE_NVD = processAndSaveCWE(config, LOAD_SAVED=True)
index1= np.argwhere(df_CWE_NVD['Name'].values == 'NVD-CWE-Other')[0][0]
index2= np.argwhere(df_CWE_NVD['Name'].values == 'NVD-CWE-noinfo')[0][0]
df_CWE_NVD.drop(index=[index1,index2], inplace = True)
return df_CVE, df_CWE_NVD, df_CWE_MITRE
# In[217]:
#load_preprocessed(config, LOAD_SAVED=False)
# ### Create Training and Test Dataset
# In[218]:
def getMask(config,df_CVE,df_CWE):
n = len(df_CWE)
m = len(df_CVE)
#get date range
train_start_date = pd.to_datetime(str(config.TRAIN_YEARS[0])+'-01-01').tz_localize('US/Eastern')
train_end_date = pd.to_datetime(str(config.TRAIN_YEARS[-1])+'-01-01').tz_localize('US/Eastern') + timedelta(days=365)
val_start_date = pd.to_datetime(str(config.VAL_YEARS[0])+'-01-01').tz_localize('US/Eastern')
val_end_date = pd.to_datetime(str(config.VAL_YEARS[-1])+'-01-01').tz_localize('US/Eastern') + timedelta(days=365)
test_start_date = pd.to_datetime(str(config.TEST_YEARS[0])+'-01-01').tz_localize('US/Eastern')
test_end_date = pd.to_datetime(str(config.TEST_YEARS[-1])+'-01-01').tz_localize('US/Eastern') + timedelta(days=365)
cwe_ids=df_CWE['Name']
cwe_map=dict(zip(cwe_ids, list(range(n))))
index_cwe_map = dict(zip(list(range(n)),cwe_ids))
#creating y and finding labeled
y=torch.zeros((m,n),dtype=torch.long)
labeled_mask= torch.zeros(m, dtype=torch.bool)
train_index = torch.zeros(m, dtype=torch.bool)
test_index = torch.zeros(m, dtype=torch.bool)
val_index = torch.zeros(m, dtype=torch.bool)
CWEs=df_CVE['CWE Code']
Dates=df_CVE['publishedDate']
for i,row in enumerate(zip(CWEs,Dates)):
cwes=row[0]
date=row[1]
if(type(cwes) == str):
cwes=[cwe for cwe in cwes.strip('[]').split("'") if not (cwe==',' or cwe==', ' or cwe=='''''')]
if(type(date) == str):
date=pd.to_datetime(date)
for cwe in cwes:
if cwe in cwe_map:
y[i][cwe_map[cwe]]=1
if torch.sum(y[i])>0:
labeled_mask[i]=True
if(train_start_date<date and date<train_end_date):
train_index[i]=True
elif(val_start_date<date and date<val_end_date):
val_index[i]=True
elif(test_start_date<date and date<test_end_date):
test_index[i]=True
else:
print(date,'-> not covered')
##convert to tensors
data=Data(train_mask=train_index, val_mask=val_index, test_mask=test_index, y=y, num_nodes=m)
return data
# In[219]:
def getPercent(data,df_CVE,df_CWE, max_data_inaclass=500):
CWEs=df_CVE['CWE Code']
train_mask= (data.train_mask == True).nonzero().flatten().numpy()
CWEs_train={}
for key in train_mask:
cwes=CWEs[key]
if(type(cwes) == str):
cwes=[cwe.strip() for cwe in cwes.strip('[]').split("'") if not (cwe==',' or cwe==', ' or cwe=='''''')]
for cwe in cwes:
if cwe in CWEs_train:
CWEs_train[cwe].append(key)
else:
CWEs_train[cwe]=[key]
required_train_mask = torch.zeros(len(data.train_mask), dtype=torch.bool)
for key, values in CWEs_train.items():
if(len(values)<max_data_inaclass):
required_train_mask[values]=True
else:
np.random.shuffle(values)
takeamnt=max_data_inaclass
required_train_mask[values[:takeamnt]]=True
data.train_mask=required_train_mask
return data
# In[ ]:
# In[220]:
from collections import OrderedDict
def CWE_description(row):
return str(row['Name'])+" "+str(row['Description'])+" "+str(row['Extended Description'])+" "+str(row['Common Consequences'])
def CWE_description_NVD(row,df_CWE_Mitre):
cwe=row['Name']
cwe_id = int(re.findall("\d+", cwe)[0])
description = df_CWE_Mitre[df_CWE_Mitre['CWE-ID'].values==cwe_id]['CVE Description'].values
if len(description)>0:
return description[0]
else:
return ''
def UpdateData(data, df_CVE, df_CWE_NVD, df_CWE_MITRE):
df_CWE_MITRE['CVE Description']= df_CWE_MITRE.apply(lambda row: CWE_description(row), axis=1)
for i, row in df_CWE_NVD.iterrows():
description=CWE_description_NVD(row, df_CWE_MITRE)
#df_CWE_NVD.set_value(i,'CVE Description',description)
df_CWE_NVD.at[i,'CVE Description']=description
df_CWE_NVD['CWE Code']= df_CWE_NVD.apply(lambda row: [str(row['Name'])], axis=1)
df_CWE_NVD=df_CWE_NVD[['CVE Description','CWE Code','Name']]
df_CVE_updated = | pd.concat([df_CVE,df_CWE_NVD],ignore_index=True, sort=False) | pandas.concat |
#%%
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
csv_path = "C:\\Users\\jackm\\Documents\\Hands On ML\\handson-ml-master\\datasets\\housing\\housing.csv"
import numpy as np
import hashlib
#%%
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))
return data.loc[~in_test_set], data.loc[in_test_set]
#%%
housing = pd.read_csv(csv_path)
#%%
display(housing)
display(housing.info())
display(housing.describe())
#%%
housing.hist(bins=50, figsize=(20, 15))
plt.show()
#%%
train_set, test_set = split_train_test(housing, 0.2)
print("{:f} train + {:2f} test".format(len(train_set), len(test_set)))
#%%
housing_with_id = housing.reset_index()
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
#%%
housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
#%%
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
#%%
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
#%%
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
#%%
housing["income_cat"].value_counts() / len(housing)
#%%
for set in (strat_train_set, strat_test_set):
set.drop(["income_cat"], axis=1, inplace=True)
#%%
housing = strat_test_set.copy()
housing.plot(
kind="scatter",
x="longitude",
y="latitude",
alpha=0.4,
s=housing["population"] / 100,
label="population",
c="median_house_value",
cmap=plt.get_cmap("jet"),
colorbar=True,
)
plt.legend()
#%%
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
#%%
from pandas.tools.plotting import scatter_matrix
attributes = [
"median_house_value",
"median_income",
"total_rooms",
"housing_median_age",
]
scatter_matrix(housing[attributes], figsize=(12, 8))
#%%
housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1)
#%%
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
#%%
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
#%%
housing.dropna(subset=["total_bedrooms"])
#%%
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
#%%
housing_num = housing.drop("ocean_proximity", axis=1)
#%%
imputer.fit(housing_num)
#%%
X = imputer.transform(housing_num)
display(X)
#%%
housing_tr = | pd.DataFrame(X, columns=housing_num.columns) | pandas.DataFrame |
import numpy as np
from numpy import where
from flask import Flask, request, jsonify, render_template
import pandas as pd
from sklearn.ensemble import IsolationForest
from pyod.models.knn import KNN
import json
from flask import send_from_directory
from flask import current_app
app = Flask(__name__)
class Detect:
def __init__(self, file, non_num):
self.file = file
self.non_num = non_num
def IQR(self):
# anomaly=pd.DataFrame()
data = pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data.dropna(axis=0,inplace=True)
# data=data.select_dtypes(include=['float64','int64'])
Q1 = data.quantile(0.25)
Q3 = data.quantile(0.75)
IQR = Q3 - Q1
IQR_Out = data[((data < (Q1 - 1.5 * IQR)) |(data > (Q3 + 1.5 * IQR))).any(axis=1)]
IQR_Out = non_num.join(IQR_Out, how='inner')
IQR_Out.to_csv(r'IQR_Outlier.csv')
# IQR Method
def isolation(self):
anomaly=pd.DataFrame()
data_n=pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data_n.dropna(axis=0,inplace=True)
# data_n=data_n.select_dtypes(include=['float64','int64'])
model = IsolationForest(n_estimators=50, max_samples=500, contamination=.01, bootstrap=False, n_jobs=1, random_state=1, verbose=0, warm_start=False).fit(data_n)
data_n['anomaly_score'] = model.predict(data_n)
anomaly=data_n[data_n['anomaly_score']==-1]
anomaly = non_num.join(anomaly, how='inner')
anomaly.to_csv("outlierss_isolation.csv")
# Isolation forest Method
def mcd(self):
anomaly=pd.DataFrame()
data=pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data.dropna(axis=0,inplace=True)
# data=data.select_dtypes(include=['float64','int64'])
from sklearn.covariance import EllipticEnvelope
model = EllipticEnvelope(contamination=0.01).fit(data)
data['anomaly_score'] = model.predict(data)
anomaly=data[data['anomaly_score']==-1]
anomaly = non_num.join(anomaly, how='inner')
anomaly.to_csv("outlierss_mcd.csv")
# Minimum covariance determinant Method
def local(self):
from numpy import quantile, where, random
import pandas as pd
anomaly = pd.DataFrame()
df = | pd.DataFrame(self.file) | pandas.DataFrame |
# this program breaks down into a csv where phosphosite orthologs could be lost in the PhosphositeOrthology program
# PhosphositePlus is being used to verify that my orthologs are correct but PSP does not have everything which is the
# reason for using dbPAF
# we want to make sure that the UniprotIDs contained in BOTH PSP and dbPAF end up in the orthologs list (ID_in_dbPAF)
# possible reasons why the candidates may not make it through are:
# dbPAF-OMA conversion table does not contain uniprotID -- see about checking both rev & unrev ID (ID_in_dbPAF_OMA_conversion_table)
# **** issues with having both reviewed and unreviewed ids in conversion table
# OMA program does not actually identify ortholog that PSP does -- in this case no fix (in_OMA_orthologs)
# or my alignment does not work well to identify phos orthologs (in_griffin_phos_orthologs)
import pandas as pd
from sqlalchemy import create_engine
import os
# Don't use excel file, interprets genes as dates
dbPAF_df = pd.read_table('../TOTAL.elm', dtype=object)
psp_df = pd.read_table('Phosphorylation_site_dataset.tab', dtype=object)
psp_df = psp_df.loc[psp_df['ORGANISM'].isin(['human', 'mouse', 'rat', 'fruit fly'])] # filter out animals not in this list
comparison_df = psp_df[['SITE_GRP_ID', 'ORGANISM', 'ACC_ID', 'PROTEIN', 'MOD_RSD']].copy() # copy only necessary columns
comparison_df.rename(columns={'SITE_GRP_ID': 'PSP_SITE_GRP_ID', 'ACC_ID': 'Uniprot_ACC_ID'}, inplace=True) # give cols more specific names
comparison_df['Position'] = comparison_df.MOD_RSD.str[1:-2] # 'S23-p' --> '23' # Position is also str in dbPAF_df
comparison_df['Type'] = comparison_df.MOD_RSD.str[0] # 'S23-p' --> 'S'
# check if the UniprotIDs in PhosphositePlus are also in dbPAF
comparison_df['ID_in_dbPAF'] = comparison_df['Uniprot_ACC_ID'].isin(dbPAF_df['Uniprot'])
# check if UniprotID, site, and amino acid type from PSP are also in dbPAF
comparison_df['ID_and_site_in_dbPAF'] = comparison_df['Uniprot_ACC_ID'].isin(dbPAF_df['Uniprot']) \
& comparison_df['Position'].isin(dbPAF_df['Position']) \
& comparison_df['Type'].isin(dbPAF_df['Type'])
# check if the UniprotIDs from PSP are in the dbPAF to OMA conversion table
oma_uniprot_df = pd.read_table('oma-uniprot_clean.txt', dtype=object)
comparison_df['ID_in_OMA-dbPAF_conversion_table'] = comparison_df['Uniprot_ACC_ID'].isin(oma_uniprot_df['uniprot'])
# check if UniprotID in OMA orthologs file
engine = create_engine('sqlite:///../phosphosite_orthology.db') # create db in outside directory
table_iter = engine.execute("SELECT name FROM sqlite_master WHERE type='table';")
ortholog_species_pairs = ['CAEEL-HUMAN_UniprotIDs', 'DROME-HUMAN_UniprotIDs', 'MOUSE-HUMAN_UniprotIDs',
'RATNO-HUMAN_UniprotIDs', 'SCHPO-HUMAN_UniprotIDs', 'YEAST-HUMAN_UniprotIDs']
oma_ortholog_df = pd.DataFrame()
for i in ortholog_species_pairs:
temp_oma_orth_df = | pd.read_sql_table(i, engine) | pandas.read_sql_table |
#!/usr/bin/env python
# cardinal_pythonlib/psychiatry/timeline.py
"""
===============================================================================
Original code copyright (C) 2009-2020 <NAME> (<EMAIL>).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
Timeline calculations.
Primarily for a lithium/renal function project, Apr 2019.
Code is in DRAFT.
Usage from R:
.. code-block:: r
# -------------------------------------------------------------------------
# Load libraries
# -------------------------------------------------------------------------
RUN_ONCE_ONLY <- '
library(devtools)
devtools::install_github("rstudio/reticulate") # get latest version
'
library(data.table)
library(reticulate)
# -------------------------------------------------------------------------
# Set up reticulate
# -------------------------------------------------------------------------
VENV <- "~/dev/venvs/cardinal_pythonlib" # or your preferred virtualenv
PYTHON_EXECUTABLE <- ifelse(
.Platform$OS.type == "windows",
file.path(VENV, "Scripts", "python.exe"), # Windows
file.path(VENV, "bin", "python") # Linux
)
reticulate::use_python(PYTHON_EXECUTABLE, required=TRUE)
# -------------------------------------------------------------------------
# Import Python modules
# -------------------------------------------------------------------------
cpl_version <- reticulate::import("cardinal_pythonlib.version")
cpl_version$assert_version_eq("1.0.50")
cpl_logs <- reticulate::import("cardinal_pythonlib.logs")
cpl_logs$main_only_quicksetup_rootlogger()
cpl_timeline <- reticulate::import("cardinal_pythonlib.psychiatry.timeline")
# -------------------------------------------------------------------------
# Do something
# -------------------------------------------------------------------------
testdata_drug_events <- data.table(
patient_id=c(
rep("Alice", 3),
rep("Bob", 3)
),
drug_event_datetime=as.Date(c(
# Alice
"2018-01-05",
"2018-01-20",
"2018-04-01",
# Bob
"2018-06-05",
"2018-08-20",
"2018-10-01"
))
)
testdata_query_times <- data.table(
patient_id=c(
rep("Alice", 3),
rep("Bob", 3)
),
start=as.Date(c(
# Alice
rep("2017-01-01", 3),
# Bob
rep("2015-01-01", 3)
)),
when=as.Date(c(
# Alice
"2018-01-01",
"2018-01-10",
"2018-02-01",
# Bob
"2018-01-01",
"2018-09-10",
"2019-02-01"
))
)
testresult <- data.table(cpl_timeline$cumulative_time_on_drug(
drug_events_df=testdata_drug_events,
event_lasts_for_quantity=3,
event_lasts_for_units="days",
query_times_df=testdata_query_times,
patient_colname="patient_id",
event_datetime_colname="drug_event_datetime",
start_colname="start",
when_colname="when",
debug=TRUE
))
print(testresult)
The result should be:
.. code-block:: none
> print(testdata_drug_events)
patient_id drug_event_datetime
1: Alice 2018-01-05
2: Alice 2018-01-20
3: Alice 2018-04-01
4: Bob 2018-06-05
5: Bob 2018-08-20
6: Bob 2018-10-01
> print(testdata_query_times)
patient_id start when
1: Alice 2017-01-01 2018-01-01
2: Alice 2017-01-01 2018-01-10
3: Alice 2017-01-01 2018-02-01
4: Bob 2015-01-01 2018-01-01
5: Bob 2015-01-01 2018-09-10
6: Bob 2015-01-01 2019-02-01
> print(testresult)
patient_id start t before_days during_days after_days
1: Alice 2017-01-01 2018-01-01 365 0 0
2: Alice 2017-01-01 2018-01-10 369 3 2
3: Alice 2017-01-01 2018-02-01 369 6 21
4: Bob 2015-01-01 2018-01-01 1096 0 0
5: Bob 2015-01-01 2018-09-10 1251 6 91
6: Bob 2015-01-01 2019-02-01 1251 9 232
However, there is a ``reticulate`` bug that can cause problems, by corrupting
dates passed from R to Python:
.. code-block:: r
# PROBLEM on 2018-04-05, with reticulate 1.11.1:
# - the R data.table is fine
# - all the dates become the same date when it's seen by Python (the value
# of the first row in each date column)
# - when used without R, the Python code is fine
# - therefore, a problem with reticulate converting data for Python
# - same with data.frame() as with data.table()
# - same with as.Date.POSIXct() and as.Date.POSIXlt() as with as.Date()
# Further test:
cpl_rfunc <- reticulate::import("cardinal_pythonlib.psychiatry.rfunc")
cat(cpl_rfunc$get_python_repr(testdata_drug_events))
cat(cpl_rfunc$get_python_repr_of_type(testdata_drug_events))
print(testdata_drug_events)
print(reticulate::r_to_py(testdata_drug_events))
# Minimum reproducible example:
library(reticulate)
testdata_drug_events <- data.frame(
patient_id=c(
rep("Alice", 3),
rep("Bob", 3)
),
drug_event_datetime=as.Date(c(
# Alice
"2018-01-05",
"2018-01-20",
"2018-04-01",
# Bob
"2018-06-05",
"2018-08-20",
"2018-10-01"
))
)
print(testdata_drug_events)
print(reticulate::r_to_py(testdata_drug_events))
# The R data is:
#
# patient_id drug_event_datetime
# 1 Alice 2018-01-05
# 2 Alice 2018-01-20
# 3 Alice 2018-04-01
# 4 Bob 2018-06-05
# 5 Bob 2018-08-20
# 6 Bob 2018-10-01
#
# Output from reticulate::r_to_py() in the buggy version is:
#
# patient_id drug_event_datetime
# 0 Alice 2018-01-05
# 1 Alice 2018-01-05
# 2 Alice 2018-01-05
# 3 Bob 2018-01-05
# 4 Bob 2018-01-05
# 5 Bob 2018-01-05
#
# Known bug: https://github.com/rstudio/reticulate/issues/454
#
# Use remove.packages() then reinstall from github as above, giving
# reticulate_1.11.1-9000 [see sessionInfo()]...
# ... yes, that fixes it.
"""
from collections import defaultdict
import datetime
import logging
import sys
from typing import Any, Dict, List
import unittest
from numpy import array
from pandas import DataFrame
from cardinal_pythonlib.interval import Interval, IntervalList
from cardinal_pythonlib.logs import (
BraceStyleAdapter,
main_only_quicksetup_rootlogger,
)
log = BraceStyleAdapter(logging.getLogger(__name__))
DEFAULT_PATIENT_COLNAME = "patient_id"
DEFAULT_DRUG_EVENT_DATETIME_COLNAME = "drug_event_datetime"
DEFAULT_START_DATETIME_COLNAME = "start"
DEFAULT_QUERY_DATETIME_COLNAME = "when"
def drug_timelines(
drug_events_df: DataFrame,
event_lasts_for: datetime.timedelta,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME) \
-> Dict[Any, IntervalList]:
"""
Takes a set of drug event start times (one or more per patient), plus a
fixed time that each event is presumed to last for, and returns an
:class:`IntervalList` for each patient representing the set of events
(which may overlap, in which case they will be amalgamated).
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data
event_lasts_for:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
patient_colname:
name of the column in ``drug_events_df`` containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
Returns:
dict: mapping patient ID to a :class:`IntervalList` object indicating
the amalgamated intervals from the events
"""
sourcecolnum_pt = drug_events_df.columns.get_loc(patient_colname)
sourcecolnum_when = drug_events_df.columns.get_loc(event_datetime_colname)
timelines = defaultdict(IntervalList)
nrows = len(drug_events_df)
for rowidx in range(nrows):
patient_id = drug_events_df.iat[rowidx, sourcecolnum_pt]
event_when = drug_events_df.iat[rowidx, sourcecolnum_when]
interval = Interval(event_when, event_when + event_lasts_for)
ivlist = timelines[patient_id] # will create if unknown
ivlist.add(interval)
return timelines
DTYPE_STRING = "<U255"
# ... see treatment_resistant_depression.py
DTYPE_DATETIME = "datetime64[s]"
# ... https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html
DTYPE_FLOAT = "Float64"
# ... https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
DTYPE_TIMEDELTA = "timedelta64"
RCN_PATIENT_ID = "patient_id" # RCN: "result column name"
RCN_START = "start"
RCN_TIME = "t"
RCN_BEFORE_TIMEDELTA = "before_timedelta"
RCN_DURING_TIMEDELTA = "during_timedelta"
RCN_AFTER_TIMEDELTA = "after_timedelta"
RCN_BEFORE_DAYS = "before_days"
RCN_DURING_DAYS = "during_days"
RCN_AFTER_DAYS = "after_days"
def cumulative_time_on_drug(
drug_events_df: DataFrame,
query_times_df: DataFrame,
event_lasts_for_timedelta: datetime.timedelta = None,
event_lasts_for_quantity: float = None,
event_lasts_for_units: str = None,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME,
start_colname: str = DEFAULT_START_DATETIME_COLNAME,
when_colname: str = DEFAULT_QUERY_DATETIME_COLNAME,
include_timedelta_in_output: bool = False,
debug: bool = False) \
-> DataFrame:
"""
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data, with columns
named according to ``patient_colname``, ``event_datetime_colname``
event_lasts_for_timedelta:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
event_lasts_for_quantity:
as an alternative to ``event_lasts_for_timedelta``, particularly if
you are calling from R to Python via ``reticulate`` (which doesn't
convert R ``as.difftime()`` to Python ``datetime.timedelta``), you
can specify ``event_lasts_for_quantity``, a number and
``event_lasts_for_units`` (q.v.).
event_lasts_for_units:
specify the units for ``event_lasts_for_quantity`` (q.v.), if used;
e.g. ``"days"``. The string value must be the name of an argument
to the Python ``datetime.timedelta`` constructor.
query_times_df:
times to query for, with columns named according to
``patient_colname``, ``start_colname``, and ``when_colname``
patient_colname:
name of the column in ``drug_events_df`` and ``query_time_df``
containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
start_colname:
name of the column in ``query_time_df`` containing the date/time
representing the overall start time for the relevant patient (from
which cumulative times are calculated)
when_colname:
name of the column in ``query_time_df`` containing date/time
values at which to query
include_timedelta_in_output:
include ``datetime.timedelta`` values in the output? The default is
``False`` as this isn't supported by R/``reticulate``.
debug:
print debugging information to the log?
Returns:
:class:`DataFrame` with the requested data
"""
if event_lasts_for_timedelta is None:
assert event_lasts_for_quantity and event_lasts_for_units
timedelta_dict = {event_lasts_for_units: event_lasts_for_quantity}
event_lasts_for_timedelta = datetime.timedelta(**timedelta_dict)
if debug:
log.critical("drug_events_df:\n{!r}", drug_events_df)
log.critical("event_lasts_for:\n{!r}", event_lasts_for_timedelta)
log.critical("query_times_df:\n{!r}", query_times_df)
timelines = drug_timelines(
drug_events_df=drug_events_df,
event_lasts_for=event_lasts_for_timedelta,
patient_colname=patient_colname,
event_datetime_colname=event_datetime_colname,
)
query_nrow = len(query_times_df)
ct_coldefs = [ # column definitions:
(RCN_PATIENT_ID, DTYPE_STRING),
(RCN_START, DTYPE_DATETIME),
(RCN_TIME, DTYPE_DATETIME),
(RCN_BEFORE_DAYS, DTYPE_FLOAT),
(RCN_DURING_DAYS, DTYPE_FLOAT),
(RCN_AFTER_DAYS, DTYPE_FLOAT),
]
if include_timedelta_in_output:
ct_coldefs.extend([
(RCN_BEFORE_TIMEDELTA, DTYPE_TIMEDELTA),
(RCN_DURING_TIMEDELTA, DTYPE_TIMEDELTA),
(RCN_AFTER_TIMEDELTA, DTYPE_TIMEDELTA),
])
ct_arr = array([None] * query_nrow, dtype=ct_coldefs)
# log.debug("ct_arr:\n{!r}", ct_arr)
cumulative_times = DataFrame(ct_arr, index=list(range(query_nrow)))
# log.debug("cumulative_times:\n{!r}", cumulative_times)
# So we can use the fast "iat" function.
sourcecolnum_pt = query_times_df.columns.get_loc(patient_colname)
sourcecolnum_start = query_times_df.columns.get_loc(start_colname)
sourcecolnum_when = query_times_df.columns.get_loc(when_colname)
dest_colnum_pt = cumulative_times.columns.get_loc(RCN_PATIENT_ID)
dest_colnum_start = cumulative_times.columns.get_loc(RCN_START)
dest_colnum_t = cumulative_times.columns.get_loc(RCN_TIME)
dest_colnum_before_days = cumulative_times.columns.get_loc(RCN_BEFORE_DAYS)
dest_colnum_during_days = cumulative_times.columns.get_loc(RCN_DURING_DAYS)
dest_colnum_after_days = cumulative_times.columns.get_loc(RCN_AFTER_DAYS)
if include_timedelta_in_output:
dest_colnum_before_dt = cumulative_times.columns.get_loc(RCN_BEFORE_TIMEDELTA) # noqa
dest_colnum_during_dt = cumulative_times.columns.get_loc(RCN_DURING_TIMEDELTA) # noqa
dest_colnum_after_dt = cumulative_times.columns.get_loc(RCN_AFTER_TIMEDELTA) # noqa
else:
# for type checker
dest_colnum_before_dt = 0
dest_colnum_during_dt = 0
dest_colnum_after_dt = 0
for rowidx in range(query_nrow):
patient_id = query_times_df.iat[rowidx, sourcecolnum_pt]
start = query_times_df.iat[rowidx, sourcecolnum_start]
when = query_times_df.iat[rowidx, sourcecolnum_when]
ivlist = timelines[patient_id]
# log.critical("ivlist: {!r}", ivlist)
before, during, after = ivlist.cumulative_before_during_after(start,
when)
# log.critical(
# "{!r}.cumulative_before_during_after(start={!r}, when={!r}) "
# "-> {!r}, {!r}, {!r}",
# ivlist, start, when,
# before, during, after
# )
cumulative_times.iat[rowidx, dest_colnum_pt] = patient_id
cumulative_times.iat[rowidx, dest_colnum_start] = start
cumulative_times.iat[rowidx, dest_colnum_t] = when
cumulative_times.iat[rowidx, dest_colnum_before_days] = before.days
cumulative_times.iat[rowidx, dest_colnum_during_days] = during.days
cumulative_times.iat[rowidx, dest_colnum_after_days] = after.days
if include_timedelta_in_output:
cumulative_times.iat[rowidx, dest_colnum_before_dt] = before
cumulative_times.iat[rowidx, dest_colnum_during_dt] = during
cumulative_times.iat[rowidx, dest_colnum_after_dt] = after
return cumulative_times
# =============================================================================
# Unit testing
# =============================================================================
class TestTimeline(unittest.TestCase):
"""
Unit tests.
"""
DATEFORMAT = "%Y-%m-%d"
DATETIMEFORMAT = "%Y-%m-%d %H:%M"
DRUG_EVENT_TIME = " 00:00" # " 09:00"
QUERY_EVENT_TIME = " 00:00" # " 12:00"
@classmethod
def dateseq(cls, first: str, last: str,
time_suffix: str = "") -> List[datetime.datetime]:
fmt = cls.DATETIMEFORMAT if time_suffix else cls.DATEFORMAT
if time_suffix:
first += time_suffix
last += time_suffix
dfirst = datetime.datetime.strptime(first, fmt)
dlast = datetime.datetime.strptime(last, fmt)
assert dfirst <= dlast
dates = [] # type: List[datetime.datetime]
d = dfirst
while d <= dlast:
dates.append(d)
d += datetime.timedelta(days=1)
return dates
def test_timeline(self) -> None:
event_lasts_for = datetime.timedelta(weeks=4)
# event_lasts_for = datetime.timedelta(days=3)
log.debug("event_lasts_for: {!r}", event_lasts_for)
alice = "Alice"
drug_events_arr = array(
[
# Alice
(alice, "2018-01-05" + self.DRUG_EVENT_TIME),
(alice, "2018-01-20" + self.DRUG_EVENT_TIME),
(alice, "2018-04-01" + self.DRUG_EVENT_TIME),
],
dtype=[
(DEFAULT_PATIENT_COLNAME, DTYPE_STRING),
(DEFAULT_DRUG_EVENT_DATETIME_COLNAME, DTYPE_DATETIME),
]
)
drug_events_df = DataFrame.from_records(drug_events_arr)
log.debug("drug_events_df:\n{!r}", drug_events_df)
start = datetime.datetime.strptime("2017-01-01" + self.DRUG_EVENT_TIME,
self.DATETIMEFORMAT)
log.debug("start: {!r}", start)
qdata_rows = []
for dt in self.dateseq("2018-01-01", "2018-05-30",
time_suffix=self.QUERY_EVENT_TIME):
qdata_rows.append((alice, start, dt))
query_times_arr = array(
qdata_rows,
dtype=[
(DEFAULT_PATIENT_COLNAME, DTYPE_STRING),
(DEFAULT_START_DATETIME_COLNAME, DTYPE_DATETIME),
(DEFAULT_QUERY_DATETIME_COLNAME, DTYPE_DATETIME),
]
)
query_times_df = | DataFrame.from_records(query_times_arr) | pandas.DataFrame.from_records |
import json
import sys
from functools import partial
import numpy as np
import pandas as pd
if sys.version_info[0] < 3:
pass
else:
pass
from tqdm import tqdm
def create_et_data(data, n_bins=5):
data["et_data"] = data['et_data'] \
.apply(str) \
.str.replace('$', ',', regex=False)
et_indices = data[(pd.notna(data['et_data'])) &
~(data['et_data'].isin(['"', '[]', 'nan']))].index
index_bins = pd.cut(et_indices, bins=n_bins, labels=np.arange(n_bins))
data_et = | pd.DataFrame([], columns=['x', 'y', 't']) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = | concat(reader, ignore_index=True) | pandas.concat |
import pandas as pd # Version 1.1.1
# %%
region = ['Vermont', 'New Hampshire', 'Maine', 'Rhode Island', 'Massachusetts',
'Connecticut', 'New Jersey', 'Pennsylvania', 'Ohio', 'Maryland',
'District of Columbia', 'Delaware', 'Virginia', 'West Virginia',
'New York']
undetected_factor = 2.4
green = 1
yellow = 2
red = 3
# %% Defines the active case calculation
prop = [1, 0.94594234, 0.8585381, 0.76322904, 0.66938185,
0.58139261, 0.50124929, 0.42963663, 0.36651186, 0.31143254,
0.26375154, 0.22273485, 0.18763259, 0.15772068, 0.1323241,
0.11082822, 0.09268291, 0.077402, 0.06456005, 0.0537877,
0.04476636, 0.03722264, 0.03092299, 0.02566868, 0.02129114,
0.0176478, 0.01461838, 0.01210161, 0.01001242, 0.00827947]
prop = prop[::-1]
def active_cases(x):
tmp = []
for i, j in zip(prop, x):
tmp.append(i * j)
return sum(tmp) * undetected_factor
def status_num(x):
if x <= 400:
return green
elif x <= 800:
return yellow
else:
return red
# %% Takes population values from JHU, which in turn come from US Census
# estimates for 2019
pops_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'\
'csse_covid_19_data/csse_covid_19_time_series/'\
'time_series_covid19_deaths_US.csv'
county_pops = | pd.read_csv(pops_url) | pandas.read_csv |
import argparse, numpy as np, os, pandas as pd
from scipy.optimize import curve_fit
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from saps2 import saps2_risk
def tune_oasis(X, y):
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X[:, None], y)
b0 = logreg.intercept_[0]
b1 = logreg.coef_[0, 0]
return b0, b1
def tune_saps2(X, y):
popt, pcov = curve_fit(saps2_risk, X, y,
p0=np.array([-7.7631, 0.0737, 0.9971]))
return popt
def tune_score(score_name, data, listfile):
if score_name not in ['oasis', 'saps2']:
raise Exception('ICU score is not recognized.')
X_train = pd.read_csv(os.path.join(data, f'train_{score_name}_scores.csv'))
X_train = X_train['score'].values
stay_df = pd.read_csv(listfile).sort_values(by=['stay'])
y_train = stay_df['y_true'].values
if score_name == 'oasis':
B = np.zeros((10, 2))
elif score_name == 'saps2':
B = np.zeros((10, 3))
for i in range(10):
X1, X2, y1, y2 = train_test_split(X_train, y_train,
test_size=0.1, stratify=y_train, random_state=i)
if score_name == 'oasis':
b = tune_oasis(X1, y1)
elif score_name == 'saps2':
b = tune_saps2(X1, y1)
B[i] = np.array(b)
return B
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tune ICU score.')
parser.add_argument('score_name', type=str,
help='ICU severity score')
parser.add_argument('data', type=str,
help='path to data directory')
parser.add_argument('listfile', type=str,
help='path to listfile')
parser.add_argument('--coefs', type=str, default='coefs',
help='path to coefficients directory')
args = parser.parse_args()
if not os.path.exists(args.coefs):
os.makedirs(args.coefs)
B = tune_score(args.score_name, args.data, args.listfile)
if args.score_name == 'oasis':
B = | pd.DataFrame(B, columns=['b0', 'b1']) | pandas.DataFrame |
#! /usr/bin/env python
#Note this file (model.py) is the same as that in Benchmarks/Pilot1/Uno/uno_baseline_keras2.py except with the following change::
#
#- unoBmk = benchmark.BenchmarkUno(benchmark.file_path, 'uno_default_model.txt', 'keras',
#+ #mymodel_common = candle.Benchmark(file_path,os.getenv("DEFAULT_PARAMS_FILE"),'keras',prog='myprog',desc='My model')
#+ unoBmk = benchmark.BenchmarkUno(benchmark.file_path, os.getenv("DEFAULT_PARAMS_FILE"), 'keras',
from __future__ import division, print_function
import argparse
import collections
import logging
import os
import random
import threading
import numpy as np
import pandas as pd
import keras
from keras import backend as K
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TensorBoard
from keras.utils import get_custom_objects
from keras.utils.vis_utils import plot_model
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from scipy.stats.stats import pearsonr
# For non-interactive plotting
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import uno as benchmark
import candle_keras as candle
import uno_data
from uno_data import CombinedDataLoader, CombinedDataGenerator
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def set_seed(seed):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
if K.backend() == 'tensorflow':
import tensorflow as tf
tf.set_random_seed(seed)
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
# Uncommit when running on an optimized tensorflow where NUM_INTER_THREADS and
# NUM_INTRA_THREADS env vars are set.
# session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
# intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
def verify_path(path):
folder = os.path.dirname(path)
if folder and not os.path.exists(folder):
os.makedirs(folder)
def set_up_logger(logfile, verbose):
verify_path(logfile)
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
for log in [logger, uno_data.logger]:
log.setLevel(logging.DEBUG)
log.addHandler(fh)
log.addHandler(sh)
def extension_from_parameters(args):
"""Construct string for saving model with annotation of parameters"""
ext = ''
ext += '.A={}'.format(args.activation)
ext += '.B={}'.format(args.batch_size)
ext += '.E={}'.format(args.epochs)
ext += '.O={}'.format(args.optimizer)
# ext += '.LEN={}'.format(args.maxlen)
ext += '.LR={}'.format(args.learning_rate)
ext += '.CF={}'.format(''.join([x[0] for x in sorted(args.cell_features)]))
ext += '.DF={}'.format(''.join([x[0] for x in sorted(args.drug_features)]))
if args.feature_subsample > 0:
ext += '.FS={}'.format(args.feature_subsample)
if args.drop > 0:
ext += '.DR={}'.format(args.drop)
if args.warmup_lr:
ext += '.wu_lr'
if args.reduce_lr:
ext += '.re_lr'
if args.residual:
ext += '.res'
if args.use_landmark_genes:
ext += '.L1000'
if args.no_gen:
ext += '.ng'
for i, n in enumerate(args.dense):
if n > 0:
ext += '.D{}={}'.format(i+1, n)
if args.dense_feature_layers != args.dense:
for i, n in enumerate(args.dense):
if n > 0:
ext += '.FD{}={}'.format(i+1, n)
return ext
def discretize(y, bins=5):
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
return classes
def r2(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def mae(y_true, y_pred):
return keras.metrics.mean_absolute_error(y_true, y_pred)
def evaluate_prediction(y_true, y_pred):
mse = mean_squared_error(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
corr, _ = pearsonr(y_true, y_pred)
return {'mse': mse, 'mae': mae, 'r2': r2, 'corr': corr}
def log_evaluation(metric_outputs, description='Comparing y_true and y_pred:'):
logger.info(description)
for metric, value in metric_outputs.items():
logger.info(' {}: {:.4f}'.format(metric, value))
def plot_history(out, history, metric='loss', title=None):
title = title or 'model {}'.format(metric)
val_metric = 'val_{}'.format(metric)
plt.figure(figsize=(8, 6))
plt.plot(history.history[metric], marker='o')
plt.plot(history.history[val_metric], marker='d')
plt.title(title)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(['train_{}'.format(metric), 'val_{}'.format(metric)], loc='upper center')
png = '{}.plot.{}.png'.format(out, metric)
plt.savefig(png, bbox_inches='tight')
class LoggingCallback(Callback):
def __init__(self, print_fcn=print):
Callback.__init__(self)
self.print_fcn = print_fcn
def on_epoch_end(self, epoch, logs={}):
msg = "[Epoch: %i] %s" % (epoch, ", ".join("%s: %f" % (k, v) for k, v in sorted(logs.items())))
self.print_fcn(msg)
class PermanentDropout(Dropout):
def __init__(self, rate, **kwargs):
super(PermanentDropout, self).__init__(rate, **kwargs)
self.uses_learning_phase = False
def call(self, x, mask=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(x)
x = K.dropout(x, self.rate, noise_shape)
return x
class ModelRecorder(Callback):
def __init__(self, save_all_models=False):
Callback.__init__(self)
self.save_all_models = save_all_models
get_custom_objects()['PermanentDropout'] = PermanentDropout
def on_train_begin(self, logs={}):
self.val_losses = []
self.best_val_loss = np.Inf
self.best_model = None
def on_epoch_end(self, epoch, logs={}):
val_loss = logs.get('val_loss')
self.val_losses.append(val_loss)
if val_loss < self.best_val_loss:
self.best_model = keras.models.clone_model(self.model)
self.best_val_loss = val_loss
def build_feature_model(input_shape, name='', dense_layers=[1000, 1000],
activation='relu', residual=False,
dropout_rate=0, permanent_dropout=True):
x_input = Input(shape=input_shape)
h = x_input
for i, layer in enumerate(dense_layers):
x = h
h = Dense(layer, activation=activation)(h)
if dropout_rate > 0:
if permanent_dropout:
h = PermanentDropout(dropout_rate)(h)
else:
h = Dropout(dropout_rate)(h)
if residual:
try:
h = keras.layers.add([h, x])
except ValueError:
pass
model = Model(x_input, h, name=name)
return model
def build_model(loader, args, permanent_dropout=True, silent=False):
input_models = {}
dropout_rate = args.drop
for fea_type, shape in loader.feature_shapes.items():
base_type = fea_type.split('.')[0]
if base_type in ['cell', 'drug']:
box = build_feature_model(input_shape=shape, name=fea_type,
dense_layers=args.dense_feature_layers,
dropout_rate=dropout_rate, permanent_dropout=permanent_dropout)
if not silent:
logger.debug('Feature encoding submodel for %s:', fea_type)
box.summary(print_fn=logger.debug)
input_models[fea_type] = box
inputs = []
encoded_inputs = []
for fea_name, fea_type in loader.input_features.items():
shape = loader.feature_shapes[fea_type]
fea_input = Input(shape, name='input.'+fea_name)
inputs.append(fea_input)
if fea_type in input_models:
input_model = input_models[fea_type]
encoded = input_model(fea_input)
else:
encoded = fea_input
encoded_inputs.append(encoded)
merged = keras.layers.concatenate(encoded_inputs)
h = merged
for i, layer in enumerate(args.dense):
x = h
h = Dense(layer, activation=args.activation)(h)
if dropout_rate > 0:
if permanent_dropout:
h = PermanentDropout(dropout_rate)(h)
else:
h = Dropout(dropout_rate)(h)
if args.residual:
try:
h = keras.layers.add([h, x])
except ValueError:
pass
output = Dense(1)(h)
return Model(inputs, output)
def initialize_parameters():
# Build benchmark object
#mymodel_common = candle.Benchmark(file_path,os.getenv("DEFAULT_PARAMS_FILE"),'keras',prog='myprog',desc='My model')
unoBmk = benchmark.BenchmarkUno(benchmark.file_path, os.getenv("DEFAULT_PARAMS_FILE"), 'keras',
prog='uno_baseline', desc='Build neural network based models to predict tumor response to single and paired drugs.')
# Initialize parameters
gParameters = candle.initialize_parameters(unoBmk)
#benchmark.logger.info('Params: {}'.format(gParameters))
return gParameters
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def run(params):
args = Struct(**params)
set_seed(args.rng_seed)
ext = extension_from_parameters(args)
verify_path(args.save)
prefix = args.save + ext
logfile = args.logfile if args.logfile else prefix+'.log'
set_up_logger(logfile, args.verbose)
logger.info('Params: {}'.format(params))
loader = CombinedDataLoader(seed=args.rng_seed)
loader.load(cache=args.cache,
ncols=args.feature_subsample,
cell_features=args.cell_features,
drug_features=args.drug_features,
drug_median_response_min=args.drug_median_response_min,
drug_median_response_max=args.drug_median_response_max,
use_landmark_genes=args.use_landmark_genes,
use_filtered_genes=args.use_filtered_genes,
preprocess_rnaseq=args.preprocess_rnaseq,
single=args.single,
train_sources=args.train_sources,
test_sources=args.test_sources,
embed_feature_source=not args.no_feature_source,
encode_response_source=not args.no_response_source,
)
val_split = args.validation_split
train_split = 1 - val_split
if args.export_data:
fname = args.export_data
loader.partition_data(cv_folds=args.cv, train_split=train_split, val_split=val_split,
cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug)
train_gen = CombinedDataGenerator(loader, batch_size=args.batch_size, shuffle=args.shuffle)
val_gen = CombinedDataGenerator(loader, partition='val', batch_size=args.batch_size, shuffle=args.shuffle)
x_train_list, y_train = train_gen.get_slice(size=train_gen.size, dataframe=True, single=args.single)
x_val_list, y_val = val_gen.get_slice(size=val_gen.size, dataframe=True, single=args.single)
df_train = pd.concat([y_train] + x_train_list, axis=1)
df_val = pd.concat([y_val] + x_val_list, axis=1)
df = | pd.concat([df_train, df_val]) | pandas.concat |
"""PatchSim: A system for doing metapopulation SEIR* models."""
import time
import logging
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def read_config(config_file):
"""Read configuration.
Configuration files contain one key=value pair per line.
The following is an example of the contents of a config file::
PatchFile=test_pop.txt
NetworkFile=test_net.txt
NetworkType=Static
ExposureRate=0.65
InfectionRate=0.67
RecoveryRate=0.4
ScalingFactor=1
SeedFile=test_seed.txt
VaxFile=test_vax.txt
VaxDelay=4
VaxEfficacy=0.5
StartDate=1
Duration=30
LoadState=False
SaveState=True
SaveFile=checkpoint1.npy
OutputFile=test1.out
OutputFormat=Whole
LogFile=test1.log
Parameters
----------
config_file : str
Path to the configuration file.
Returns
-------
dict (str -> str)
The configuration key value pairs.
"""
config_df = pd.read_csv(config_file, delimiter="=", names=["key", "val"])
configs = dict(zip(config_df.key, config_df.val))
configs.setdefault("Model", "Mobility")
return configs
def load_patch(configs):
"""Load the patch file.
A patch file contains the population size of a patch.
The file has two space separated columns.
Following is an example of a patch file::
A 10000
B 10000
C 10000
Parameters
----------
configs : dict
The configuration dictionary.
Must contain the "PatchFile" pointing to location of patch file.
Returns
-------
DataFrame (names=(id, pops), dtypes=(str, int))
A dataframe containing populations of patches.
"""
patch_df = pd.read_csv(
configs["PatchFile"],
names=["id", "pops"],
delimiter=" ",
dtype={"id": str, "pops": int},
)
patch_df.sort_values("id", inplace=True)
logger.info("Loaded patch attributes")
return patch_df
def load_param_file(configs):
"""Load the parameter file.
A parameter file contains one row per patch.
Each row must have two or more columns.
Following is an example of a paremter file::
B 0 0 0.54 0.54 0.54 0.54 0 0 0 0
A 0.72
Parameters
----------
configs : dict
The configuration dictionary.
Must contain the "ParamFile" pointing to location of parameter file.
patch_df : DataFrame
A dataframe containing populations of patches.
Returns
-------
DataFrame
A dataframe with one column per patch.
The column names are IDs of the patches.
Each column contains the "beta" value of the patch over time.
"""
param_df = pd.read_csv(
configs["ParamFile"], delimiter=" ", dtype={0: str}, header=None
)
param_df = param_df.set_index(0)
param_df = param_df.fillna(method="ffill", axis=1)
param_df = param_df.T
return param_df
def load_params(configs, patch_df):
"""Load the simulation parameters.
Parameters
----------
configs : dict
The configuration key value pairs.
patch_df : DataFrame
A dataframe containing populations of patches.
Returns
-------
dict (str -> float or ndarray)
A dictionary of model parameters.
The "beta" parameter is a ndarray
with shape=(NumPatches x NumTimesteps)
and dtype=float.
"""
params = {}
params["T"] = int(configs["Duration"])
beta = float(configs.get("ExposureRate", 0.0))
params["beta"] = np.full((len(patch_df), params["T"]), beta)
params["alpha"] = float(configs.get("InfectionRate", 0.0))
params["gamma"] = float(configs.get("RecoveryRate", 0.0))
logger.info(
"Parameter: alpha=%e, beta=%e, gamma=%e", params["alpha"], beta, params["gamma"]
)
if "ParamFile" in configs:
param_df = load_param_file(configs)
for i, id_ in enumerate(patch_df["id"]):
if id_ in param_df.columns:
xs = param_df[id_]
params["beta"][i, 0 : len(xs)] = xs
logger.info("Loaded disease parameters from ParamFile")
else:
logger.info("No ParamFile loaded")
### Optional parameters
params["scaling"] = float(configs.get("ScalingFactor", 1.0))
params["vaxeff"] = float(configs.get("VaxEfficacy", 1.0))
params["delta"] = float(configs.get("WaningRate", 0.0))
params["vaxdelta"] = float(configs.get("VaxWaningRate", 0.0))
params["kappa"] = float(configs.get("AsymptomaticReduction", 1.0))
params["symprob"] = float(configs.get("SymptomaticProbability", 1.0))
params["epsilon"] = float(configs.get("PresymptomaticReduction", 1.0))
# if params["delta"]:
# logger.info("Found WaningRate. Running SEIRS model.")
return params
def load_seed(configs, params, patch_df):
"""Load the disease seeding schedule file.
A seed file contains the disease seeding schedule.
Following is an example of the contents of a seed file::
0 A 20
0 B 20
1 C 20
2 C 30
Parameters
----------
configs : dict
The configuration dictionary.
params: dict (str -> float or ndarray)
A dictionary of model parameters.
patch_df : DataFrame
A dataframe containing populations of patches.
Returns
-------
ndarray shape=(NumTimsteps x NumPatches)
A seeding schedule matrix
"""
if "SeedFile" not in configs:
logger.info("Continuing without seeding")
return np.zeros((params["T"], len(patch_df)))
seed_df = pd.read_csv(
configs["SeedFile"],
delimiter=" ",
names=["Day", "Id", "Count"],
dtype={"Id": str},
)
seed_mat = np.zeros((params["T"], len(patch_df)))
seed_df = seed_df[seed_df.Day<params["T"]] ### Skipping seeds after end of simulation
patch_idx = {id_: i for i, id_ in enumerate(patch_df["id"])}
for day, id_, count in seed_df.itertuples(index=False, name=None):
idx = patch_idx[id_]
seed_mat[day, idx] = count
logger.info("Loaded seeding schedule")
return seed_mat
def load_vax(configs, params, patch_df):
"""Load the vaccination schedule file.
A vax file contains the vaccination schedule.
Following is an example of the contents of the vax file::
0 A 10
2 B 10
5 C 10
Parameters
----------
configs : dict
The configuration dictionary.
params: dict (str -> float or ndarray)
A dictionary of model parameters.
patch_df : DataFrame
A dataframe containing populations of patches.
Returns
-------
ndarray shape=(NumTimsteps x NumPatches)
A vaccination schedule matrix (NumTimsteps x NumPatches)
"""
vax_mat = np.zeros((params["T"], len(patch_df)), dtype=int)
if "VaxFile" not in configs:
return vax_mat
vax_df = pd.read_csv(
configs["VaxFile"],
delimiter=" ",
names=["Day", "Id", "Count"],
dtype={"Id": str, "Count": int},
)
vax_delay = int(configs.get("VaxDelay", 0))
vax_df = vax_df[vax_df.Day<params["T"] - vax_delay] ### Skipping vaxs which get applied after end of simulation
patch_idx = {id_: i for i, id_ in enumerate(patch_df["id"])}
for day, id_, count in vax_df.itertuples(index=False, name=None):
idx = patch_idx[id_]
day = day + vax_delay
vax_mat[day, idx] = count
return vax_mat
def load_Theta(configs, patch_df):
"""Load the patch connectivity network.
This function loads the dynamic network connectity file.
The following is an example of the network connectity file::
A A 0 1
B B 0 1
C C 0 1
Parameters
----------
configs : dict
The configuration dictionary.
Must contain keys "NetworkFile" and "NetworkType".
patch_df : DataFrame
A dataframe containing populations of patches.
Returns
-------
ndarray shape=(NumThetaIndices x NumPatches x NumPatches)
The dynamic patch connectivity network
"""
theta_df = pd.read_csv(
configs["NetworkFile"],
names=["src_Id", "dest_Id", "theta_index", "flow"],
delimiter=" ",
dtype={"src_Id": str, "dest_Id": str},
)
if configs["NetworkType"] == "Static":
if not np.all(theta_df.theta_index == 0):
raise ValueError("Theta indices mismatch. Ensure NetworkType=Static.")
elif configs["NetworkType"] == "Weekly":
if not list(sorted(set(theta_df.theta_index))) == list(range(53)):
raise ValueError("Theta indices mismatch. Ensure NetworkType=Weekly.")
elif configs["NetworkType"] == "Monthly":
if not list(sorted(set(theta_df.theta_index))) == list(range(12)):
raise ValueError("Theta indices mismatch. Ensure NetworkType=Monthly.")
else:
raise ValueError("Unknown NetworkType=%s" % configs["NetworkType"])
Theta_indices = theta_df.theta_index.unique()
Theta = np.zeros((len(Theta_indices), len(patch_df), len(patch_df)))
patch_idx = {id_: i for i, id_ in enumerate(patch_df["id"])}
for src_Id, dest_Id, theta_index, flow in theta_df.itertuples(
index=False, name=None
):
try:
src_Idx = patch_idx[src_Id]
dest_Idx = patch_idx[dest_Id]
Theta[theta_index, src_Idx, dest_Idx] = flow
except KeyError:
logger.warning(
"Ignoring flow entries for missing patches. Ensure all patches listed in PatchFile."
)
logger.info("Loaded temporal travel matrix")
return Theta
def do_patchsim_stoch_mobility_step(
State_Array, patch_df, params, theta, seeds, vaxs, t
):
"""Do step of the stochastic (mobility) simulation."""
# FIXME: This method doesn't work at all
# as populate new_inf
# which is what gets written out.
S, E, I, R, V, new_inf = State_Array ## Aliases for the State Array
## seeding for day t (seeding implies S->I)
actual_seed = np.minimum(seeds[t], S[t])
S[t] = S[t] - actual_seed
I[t] = I[t] + actual_seed
## vaccination for day t
max_SV = np.minimum(vaxs[t], S[t])
actual_SV = np.random.binomial(max_SV.astype(int), params["vaxeff"])
S[t] = S[t] - actual_SV
V[t] = V[t] + actual_SV
N = patch_df.pops.to_numpy()
# Effective population after movement step
N_eff = theta.T.dot(N)
I_eff = theta.T.dot(I[t])
E_eff = theta.T.dot(E[t])
# Force of infection from symp/asymptomatic individuals
beta_j_eff = I_eff
beta_j_eff = beta_j_eff / N_eff
beta_j_eff = beta_j_eff * params["beta"][:, t]
beta_j_eff = beta_j_eff * (
(1 - params["kappa"]) * (1 - params["symprob"]) + params["symprob"]
)
beta_j_eff = np.nan_to_num(beta_j_eff)
# Force of infection from presymptomatic individuals
E_beta_j_eff = E_eff
E_beta_j_eff = E_beta_j_eff / N_eff
E_beta_j_eff = E_beta_j_eff * params["beta"][:, t]
E_beta_j_eff = E_beta_j_eff * (1 - params["epsilon"])
E_beta_j_eff = np.nan_to_num(E_beta_j_eff)
# Infection force
inf_force = theta.dot(beta_j_eff + E_beta_j_eff)
# New exposures during day t
actual_SE = np.random.binomial(S[t],inf_force)
actual_EI = np.random.binomial(E[t], params["alpha"])
actual_IR = np.random.binomial(I[t], params["gamma"])
actual_RS = np.random.binomial(R[t], params["delta"])
actual_VS = np.random.binomial(V[t], params["vaxdelta"])
# Update to include presymptomatic and asymptomatic terms
S[t + 1] = S[t] - actual_SE + actual_RS + actual_VS
E[t + 1] = E[t] + actual_SE - actual_EI
I[t + 1] = I[t] + actual_EI - actual_IR
R[t + 1] = R[t] + actual_IR - actual_RS
V[t + 1] = V[t] - actual_VS
new_inf[t] = actual_SE
## Earlier computation of force of infection included network sampling.
## Now only implementing only disease progression stochasticity
# N = patch_df.pops.values
# S_edge = np.concatenate(
# [
# np.random.multinomial(
# S[t][x], theta[x] / (theta[x].sum() + 10 ** -12)
# ).reshape(1, len(N))
# for x in range(len(N))
# ],
# axis=0,
# )
# E_edge = np.concatenate(
# [
# np.random.multinomial(
# E[t][x], theta[x] / (theta[x].sum() + 10 ** -12)
# ).reshape(1, len(N))
# for x in range(len(N))
# ],
# axis=0,
# )
# I_edge = np.concatenate(
# [
# np.random.multinomial(
# I[t][x], theta[x] / (theta[x].sum() + 10 ** -12)
# ).reshape(1, len(N))
# for x in range(len(N))
# ],
# axis=0,
# )
# R_edge = np.concatenate(
# [
# np.random.multinomial(
# R[t][x], theta[x] / (theta[x].sum() + 10 ** -12)
# ).reshape(1, len(N))
# for x in range(len(N))
# ],
# axis=0,
# )
# V_edge = np.concatenate(
# [
# np.random.multinomial(
# V[t][x], theta[x] / (theta[x].sum() + 10 ** -12)
# ).reshape(1, len(N))
# for x in range(len(N))
# ],
# axis=0,
# )
# N_edge = S_edge + E_edge + I_edge + R_edge + V_edge
# N_eff = N_edge.sum(axis=0)
# I_eff = I_edge.sum(axis=0)
# beta_j_eff = np.nan_to_num(params["beta"][:, t] * (I_eff / N_eff))
# actual_SE = np.concatenate(
# [
# np.random.binomial(S_edge[:, x], beta_j_eff[x]).reshape(len(N), 1)
# for x in range(len(N))
# ],
# axis=1,
# ).sum(axis=1)
# actual_EI = np.random.binomial(E[t], params["alpha"])
# actual_IR = np.random.binomial(I[t], params["gamma"])
# actual_RS = np.random.binomial(R[t], params["delta"])
# ### Update to include presymptomatic and asymptomatic terms
# S[t + 1] = S[t] - actual_SE + actual_RS
# E[t + 1] = E[t] + actual_SE - actual_EI
# I[t + 1] = I[t] + actual_EI - actual_IR
# R[t + 1] = R[t] + actual_IR - actual_RS
# V[t + 1] = V[t]
def do_patchsim_det_mobility_step(State_Array, patch_df, params, theta, seeds, vaxs, t):
"""Do step of the deterministic simulation."""
S, E, I, R, V, new_inf = State_Array ## Aliases for the State Array
# seeding for day t (seeding implies S->I)
actual_seed = np.minimum(seeds[t], S[t])
S[t] = S[t] - actual_seed
I[t] = I[t] + actual_seed
# vaccination for day t
actual_vax = np.minimum(vaxs[t] * params["vaxeff"], S[t])
S[t] = S[t] - actual_vax
V[t] = V[t] + actual_vax
N = patch_df.pops.to_numpy()
# Effective population after movement step
N_eff = theta.T.dot(N)
I_eff = theta.T.dot(I[t])
E_eff = theta.T.dot(E[t])
# Force of infection from symp/asymptomatic individuals
beta_j_eff = I_eff
beta_j_eff = beta_j_eff / N_eff
beta_j_eff = beta_j_eff * params["beta"][:, t]
beta_j_eff = beta_j_eff * (
(1 - params["kappa"]) * (1 - params["symprob"]) + params["symprob"]
)
beta_j_eff = np.nan_to_num(beta_j_eff)
# Force of infection from presymptomatic individuals
E_beta_j_eff = E_eff
E_beta_j_eff = E_beta_j_eff / N_eff
E_beta_j_eff = E_beta_j_eff * params["beta"][:, t]
E_beta_j_eff = E_beta_j_eff * (1 - params["epsilon"])
E_beta_j_eff = np.nan_to_num(E_beta_j_eff)
# Infection force
inf_force = theta.dot(beta_j_eff + E_beta_j_eff)
# New exposures during day t
new_inf[t] = inf_force * S[t]
new_inf[t] = np.minimum(new_inf[t], S[t])
# Update to include presymptomatic and asymptomatic terms
S[t + 1] = S[t] - new_inf[t] + params["delta"] * R[t] + params["vaxdelta"] * V[t]
E[t + 1] = new_inf[t] + (1 - params["alpha"]) * E[t]
I[t + 1] = params["alpha"] * E[t] + (1 - params["gamma"]) * I[t]
R[t + 1] = params["gamma"] * I[t] + (1 - params["delta"]) * R[t]
V[t + 1] = (1 - params["vaxdelta"]) * V[t]
def do_patchsim_det_force_step(State_Array, patch_df, params, theta, seeds, vaxs, t):
"""Do step of the deterministic simulation."""
S, E, I, R, V, new_inf = State_Array ## Aliases for the State Array
# seeding for day t (seeding implies S->I)
actual_seed = np.minimum(seeds[t], S[t])
S[t] = S[t] - actual_seed
I[t] = I[t] + actual_seed
# vaccination for day t
actual_vax = np.minimum(vaxs[t] * params["vaxeff"], S[t])
S[t] = S[t] - actual_vax
V[t] = V[t] + actual_vax
N = patch_df.pops.to_numpy()
# Effective beta
beta_j_eff = I[t]
beta_j_eff = beta_j_eff / N
beta_j_eff = beta_j_eff * params["beta"][:, t]
beta_j_eff = np.nan_to_num(beta_j_eff)
# Infection force
inf_force = theta.T.dot(beta_j_eff)
# New exposures during day t
new_inf[t] = inf_force * S[t]
new_inf[t] = np.minimum(new_inf[t], S[t])
# Update to include presymptomatic and asymptomatic terms
S[t + 1] = S[t] - new_inf[t] + params["delta"] * R[t] + params["vaxdelta"] * V[t]
E[t + 1] = new_inf[t] + (1 - params["alpha"]) * E[t]
I[t + 1] = params["alpha"] * E[t] + (1 - params["gamma"]) * I[t]
R[t + 1] = params["gamma"] * I[t] + (1 - params["delta"]) * R[t]
V[t + 1] = (1 - params["vaxdelta"]) * V[t]
def patchsim_step(State_Array, patch_df, configs, params, theta, seeds, vaxs, t, stoch):
"""Do step of the simulation."""
if stoch:
if configs["Model"] == "Mobility":
return do_patchsim_stoch_mobility_step(
State_Array, patch_df, params, theta, seeds, vaxs, t
)
else:
raise ValueError(
"Unknown Model %s for stochastic simulation" % configs["Model"]
)
else:
if configs["Model"] == "Mobility":
return do_patchsim_det_mobility_step(
State_Array, patch_df, params, theta, seeds, vaxs, t
)
elif configs["Model"] == "Force":
return do_patchsim_det_force_step(
State_Array, patch_df, params, theta, seeds, vaxs, t
)
else:
raise ValueError(
"Unknown Model %s for deterministic simulation" % configs["Model"]
)
def epicurves_todf(configs, params, patch_df, State_Array):
"""Convert the epicurve (new infection over time) into a dataframe.
Parameters
----------
configs : dict
The configuration dictionary.
params : dict
A dictionary of model parameters.
patch_df : dict
A dataframe containing populations of patches.
State_Array : 5 tuple
A tuple of disease state information.
Returns
-------
DataFrame
A dataframe containing the new infections.
There is one row per patch.
There is one column per timestep.
"""
new_inf = State_Array[-1]
data = new_inf[:-1, :].T
data = data * float(params["scaling"])
if configs["OutputFormat"] == "Whole":
data = data.round().astype(int)
index = patch_df.id
columns = np.arange(int(configs["Duration"]))
out_df = | pd.DataFrame(index=index, columns=columns, data=data) | pandas.DataFrame |
from sklearn.metrics import confusion_matrix, f1_score, roc_curve
import numpy as np
import pandas as pd
class analysis:
def __init__(self):
pass
def _getComplexParams(self, abs=False):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = | pd.Series(chi, name='chi') | pandas.Series |
import os
import sys
import json
import gzip
import glob
import pandas as pd
import numpy as np
from nameparser import HumanName
import requests
import ftplib
from lxml import etree
from io import BytesIO
# determine if we are loading from a jupyter notebook (to make pretty progress bars)
if 'ipykernel' in sys.modules:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
from pyscisci.datasource.readwrite import load_preprocessed_data, load_int, load_float, load_html_str, load_xml_text
from pyscisci.database import BibDataBase
class PubMed(BibDataBase):
"""
Base class for PubMed Medline interface.
Notes
-------
~ PubMed comes as >1000 compressed XML files.
~ The PMID is renamed PublicationId to be consistent with the rest of pySciSci.
~ PubMed does not disambiguate Authors.
~
"""
def __init__(self, path2database='', keep_in_memory=False, global_filter=None, show_progress=True):
self._default_init(path2database, keep_in_memory, global_filter, show_progress)
self.PublicationIdType = int
self.AffiliationIdType = int
self.AuthorIdType = str
def _blank_pubmed_publication(self, PublicationId = 0):
record = {}
record['PublicationId'] = PublicationId
record['Title'] = ''
record['Year'] = 0
record['Volume'] = 0
record['Issue'] = ''
record['Pages'] = ''
record['JournalId'] = ''
record['TeamSize'] = 0
record['Month'] = 1
record['Day'] = 1
record['ISSN'] = ''
record['Doi'] = ''
return record
def _blank_pubmed_author(self):
record = {}
record['PublicationId'] = ''
record['FullName'] = ''
record['FirstName'] = ''
record['LastName'] = ''
record['Affiliations'] = ''
record['AuthorSequence'] = 0
return record
def _save_dataframes(self, ifile, publication_df, paa_df, pub2ref_df, pub2field_df):
publication_df = pd.DataFrame(publication_df)
publication_df['PublicationId'] = publication_df['PublicationId'].astype(int)
publication_df['Year'] = publication_df['Year'].astype(int)
publication_df['Month'] = publication_df['Month'].astype(int)
publication_df['Day'] = publication_df['Day'].astype(int)
publication_df['Volume'] = pd.to_numeric(publication_df['Volume'])
publication_df['TeamSize'] = publication_df['TeamSize'].astype(int)
publication_df.to_hdf( os.path.join(self.path2database, self.path2pub_df, 'publication{}.hdf'.format(ifile)), key = 'pub', mode='w')
paa_df = pd.DataFrame(paa_df)
paa_df['AuthorSequence'] = paa_df['AuthorSequence'].astype(int)
paa_df.to_hdf( os.path.join(self.path2database, self.path2paa_df, 'publicationauthoraffiliation{}.hdf'.format(ifile)), key = 'paa', mode='w')
pub2field_df = pd.DataFrame(pub2field_df, columns = ['PublicationId', 'FieldId'], dtype=int)
pub2field_df.to_hdf( os.path.join(self.path2database, self.path2pub2field_df, 'pub2field{}.hdf'.format(ifile)), key = 'pub2field', mode='w')
pub2ref_df = pd.DataFrame(pub2ref_df, columns = ['CitedPublicationId', 'CitingPublicationId', 'Citation'], dtype=int)
pub2ref_df.to_hdf( os.path.join(self.path2database, self.path2pub2ref_df, 'pub2ref{}.hdf'.format(ifile)), key = 'pub2ref', mode='w')
def preprocess(self, xml_directory = 'RawXML', process_name=True, num_file_lines=10**6, show_progress=True,rewrite_existing = False):
"""
Bulk preprocess of the PubMed raw data.
Parameters
----------
process_name: bool, default True
If True, then when processing the raw file, the package `NameParser <https://nameparser.readthedocs.io/en/latest/>`_
will be used to split author FullNames.
num_file_lines: int, default 10**6
The processed data will be saved into smaller DataFrames, each with `num_file_lines` rows.
show_progress: bool, default True
Show progress with processing of the data.
rewrite_existing: bool, default False
If True, rewrites the files in the data directory
"""
if show_progress:
print("Starting to preprocess the PubMed database.")
for hier_dir_type in [self.path2pub_df, self.path2paa_df, self.path2pub2field_df, self.path2pub2ref_df, self.path2fieldinfo_df]:
if not os.path.exists(os.path.join(self.path2database, hier_dir_type)):
os.mkdir(os.path.join(self.path2database, hier_dir_type))
xmlfiles = sorted([fname for fname in os.listdir(os.path.join(self.path2database, xml_directory)) if '.xml' in fname])
# read dtd - this takes
path2database = self.path2database # remove self to use inside of this class
class DTDResolver(etree.Resolver):
def resolve(self, system_url, public_id, context):
return self.resolve_filename(os.path.join(path2database, system_url), context)
parser = etree.XMLParser(load_dtd=True, resolve_entities=True)
pub2year = {}
fieldinfo = {}
ifile = 0
for xml_file_name in tqdm(xmlfiles, desc='PubMed xml files', leave=True, disable=not show_progress):
# check if the xml file was already parsed
dest_file_name = os.path.join(self.path2database, self.path2pub_df,'publication{}.hdf'.format(ifile))
if not rewrite_existing and os.path.isfile(dest_file_name):
ifile+=1
continue
publication_df = []
paa_df = []
pub2field_df = []
pub2ref_df = []
xmltree = etree.parse(os.path.join(self.path2database, xml_directory, xml_file_name), parser)
all_pubmed_articles = xmltree.findall("/PubmedArticle")
for article_bucket in all_pubmed_articles:
medline = article_bucket.find("MedlineCitation")
# scrape the publication information
PublicationId = load_int(load_xml_text(medline.find('PMID')))
pub_record = self._blank_pubmed_publication(PublicationId)
article = medline.find("Article")
pub_record['Title'] = load_html_str(load_xml_text(article.find('ArticleTitle')))
if article.find('Pagination') == None:
pub_record['Pages'] = None
else:
pub_record['Pages'] = load_html_str(load_xml_text(article.find('Pagination').find("MedlinePgn")))
journal = article.find("Journal")
pub_record['JournalId'] = load_html_str(load_xml_text(journal.find("Title")))
pub_record['Volume'] = load_int(load_xml_text(journal.find("JournalIssue").find("Volume")))
pub_record['Issue'] = load_int(load_xml_text(journal.find("JournalIssue").find("Issue")))
pub_record['ISSN'] = load_html_str(load_xml_text(journal.find("ISSN")))
history = article_bucket.find("PubmedData/History")
if not history is None:
pdate = history.find('PubMedPubDate')
if not pdate is None:
pub_record['Year'] = load_int(load_xml_text(pdate.find("Year")))
pub_record['Month'] = load_int(load_xml_text(pdate.find("Month")))
pub_record['Day'] = load_int(load_xml_text(pdate.find("Day")))
if pub_record['Year'] > 0:
pub2year[PublicationId] = pub_record['Year']
article_ids = article_bucket.find("PubmedData/ArticleIdList")
if article_ids is not None:
doi = article_ids.find('ArticleId[@IdType="doi"]')
pub_record['Doi'] = load_xml_text(doi)
author_list = article.find('AuthorList')
if not author_list is None:
for seq, author in enumerate(author_list.findall('Author')):
author_record = self._blank_pubmed_author()
author_record['PublicationId'] = PublicationId
author_record['FirstName'] = load_html_str(load_xml_text(author.find("ForeName")))
author_record['LastName'] = load_html_str(load_xml_text(author.find("LastName")))
author_record['FullName'] = author_record['FirstName'] + ' ' + author_record['LastName']
if author.find("AffiliationInfo/Affiliation") is not None:
author_record['Affiliations'] = load_html_str(load_xml_text(author.find("AffiliationInfo/Affiliation")))
author_record['Affiliations'] = author_record['Affiliations'].replace("For a full list of the authors' affiliations please see the Acknowledgements section.","")
author_record['AuthorSequence'] = seq+1
paa_df.append(author_record)
pub_record['TeamSize'] = seq + 1
meshterms = medline.find("MeshHeadingList")
if meshterms is not None:
for term in meshterms.getchildren():
ui = term.find("DescriptorName").attrib.get("UI", "")
if len(ui)>0:
pub2field_df.append([PublicationId, ui])
fieldinfo[ui] = [load_xml_text(term.find("DescriptorName")), 'mesh']
chemicals = medline.find("ChemicalList")
if chemicals is not None:
for chemical in chemicals.findall("Chemical"):
ui = chemical.find("NameOfSubstance").attrib.get("UI", "")
if len(ui)>0:
pub2field_df.append([PublicationId, ui])
fieldinfo[ui] = [load_xml_text(chemical.find("NameOfSubstance")), 'chem']
references = article_bucket.find("PubmedData/ReferenceList")
if not references is None:
for ref in references.findall("Reference"):
citation = load_xml_text(ref.find("Citation"))
if not ref.find('ArticleIdList') is None:
pmid = load_int(load_xml_text(ref.find('ArticleIdList').find('ArticleId[@IdType="pubmed"]')))
else:
pmid = ""
pub2ref_df.append([PublicationId, pmid, citation])
publication_df.append(pub_record)
self._save_dataframes(ifile, publication_df, paa_df, pub2ref_df, pub2field_df)
ifile += 1
# if rewriting
dest_file_name = os.path.join(self.path2database, self.path2fieldinfo,'fieldinfo.hdf')
if rewrite_existing:
# save field info dictionary
mesh_id_df_list = list(fieldinfo.values())
for i, j in enumerate(fieldinfo.keys()):
mesh_id_df_list[i].insert(0, j)
fieldinfo = pd.DataFrame(mesh_id_df_list, columns = ['FieldId', 'FieldName', 'FieldType'], dtype=int)
fieldinfo.to_hdf( os.path.join(self.path2database, self.path2fieldinfo, 'fieldinfo.hdf'), key = 'fieldinfo', mode='w')
with gzip.open(os.path.join(self.path2database, 'pub2year.json.gz'), 'w') as outfile:
outfile.write(json.dumps(pub2year).encode('utf8'))
def download_from_source(self, source_url='ftp.ncbi.nlm.nih.gov', dtd_url = 'https://dtd.nlm.nih.gov/ncbi/pubmed/out/pubmed_190101.dtd',
rewrite_existing = False, show_progress=True):
"""
Download the Pubmed raw xml files and the dtd formating information from [PubMed](https://www.nlm.nih.gov/databases/download/pubmed_medline.html).
1. pubmed/baseline - the directory containing the baseline compressed xml files
2. pubmed_190101.dtd - the dtd containing xml syntax
The files will be saved to the path specified by `path2database` into RawXML.
Parameters
----------
source_url: str, default 'ftp.ncbi.nlm.nih.gov'
The base url for the ftp server from which to download.
dtd_url: str, default 'pubmed_190101.dtd'
The url for the dtd file.
show_progress: bool, default True
Show progress with processing of the data.
"""
FTP_USER = "anonymous"
FTP_PASS = ""
ftp = ftplib.FTP(source_url, FTP_USER, FTP_PASS)
ftp.encoding = "utf-8"
ftp.cwd("pubmed/baseline/")
files2download = sorted([fname for fname in ftp.nlst() if '.xml.gz' in fname and not '.md5' in fname])
if not os.path.exists(os.path.join(self.path2database, 'RawXML')):
os.mkdir(os.path.join(self.path2database, 'RawXML'))
if not rewrite_existing:
files_already_downloaded = os.listdir(os.path.join(self.path2database, 'RawXML'))
files2download = [fname for fname in files2download if not fname in files_already_downloaded]
for xml_file_name in tqdm(files2download, disable=not show_progress):
with open(os.path.join(self.path2database, 'RawXML', xml_file_name), "wb") as outfile:
ftp.retrbinary('RETR %s' % xml_file_name, outfile.write)
with open(os.path.join(self.path2database, 'RawXML', 'pubmed_190101.dtd'), 'w') as outfile:
outfile.write(requests.get(dtd_url).content.decode('utf-8'))
ftp.quit()
def parse_affiliations(self, preprocess = False):
raise NotImplementedError("PubMed artciles are stored with all information in an xml file. Run preprocess to parse the file.")
def parse_publications(self, xml_directory = 'RawXML',preprocess = True, num_file_lines=10**7,rewrite_existing = False):
"""
Parse the PubMed publication raw data.
Parameters
----------
preprocess: bool, default True
Save the processed data in new DataFrames.
process_name: bool, default True
If True, then when processing the raw file, the package `NameParser <https://nameparser.readthedocs.io/en/latest/>`_
will be used to split author FullNames.
num_file_lines: int, default 5*10**6
The processed data will be saved into smaller DataFrames, each with `num_file_lines` rows.
show_progress: bool, default True
Show progress with processing of the data.
Returns
----------
DataFrame
Publication metadata DataFrame.
"""
# process publication files through xml
if preprocess:
if not os.path.exists(os.path.join(self.path2database, 'publication')):
os.mkdir(os.path.join(self.path2database, 'publication'))
xmlfiles = sorted([fname for fname in os.listdir(os.path.join(self.path2database, xml_directory)) if '.xml' in fname])
# read dtd - this takes
path2database = self.path2database # remove self to use inside of this class
class DTDResolver(etree.Resolver):
def resolve(self, system_url, public_id, context):
return self.resolve_filename(os.path.join(path2database, system_url), context)
parser = etree.XMLParser(load_dtd=True, resolve_entities=True)
ifile = 0
for xml_file_name in tqdm(xmlfiles, desc='PubMed publication xml files', leave=True, disable=not show_progress):
# check if the xml file was already parsed
dest_file_name = os.path.join(self.path2database, self.path2paa_df,'publication{}.hdf'.format(ifile))
if not rewrite_existing and os.path.isfile(dest_file_name):
ifile+=1
continue
publication_df = []
all_pubmed_articles = xmltree.findall("/PubmedArticle")
for article_bucket in all_pubmed_articles:
medline = article_bucket.find("MedlineCitation")
# scrape the publication information
PublicationId = load_int(load_xml_text(medline.find('PMID')))
pub_record = self._blank_pubmed_publication(PublicationId)
article = medline.find("Article")
pub_record['Title'] = load_html_str(load_xml_text(article.find('ArticleTitle')))
if article.find('Pagination') == None:
pub_record['Pages'] = None
else:
pub_record['Pages'] = load_html_str(load_xml_text(article.find('Pagination').find("MedlinePgn")))
journal = article.find("Journal")
pub_record['JournalId'] = load_html_str(load_xml_text(journal.find("Title")))
pub_record['Volume'] = load_int(load_xml_text(journal.find("JournalIssue").find("Volume")))
pub_record['Issue'] = load_int(load_xml_text(journal.find("JournalIssue").find("Issue")))
pub_record['ISSN'] = load_html_str(load_xml_text(journal.find("ISSN")))
history = article_bucket.find("PubmedData/History")
if not history is None:
pdate = history.find('PubMedPubDate')
if not pdate is None:
pub_record['Year'] = load_int(load_xml_text(pdate.find("Year")))
pub_record['Month'] = load_int(load_xml_text(pdate.find("Month")))
pub_record['Day'] = load_int(load_xml_text(pdate.find("Day")))
article_ids = article_bucket.find("PubmedData/ArticleIdList")
if article_ids is not None:
doi = article_ids.find('ArticleId[@IdType="doi"]')
pub_record['Doi'] = load_xml_text(doi)
author_list = article.find('AuthorList')
if not author_list is None:
pub_record['TeamSize'] = len(author_list.findall('Author'))
publication_df.append(pub_record)
# save publication dataframe
publication_df = pd.DataFrame(publication_df)
publication_df['PublicationId'] = publication_df['PublicationId'].astype(int)
publication_df['Year'] = publication_df['Year'].astype(int)
publication_df['Month'] = publication_df['Month'].astype(int)
publication_df['Day'] = publication_df['Day'].astype(int)
publication_df['Volume'] = pd.to_numeric(publication_df['Volume'])
publication_df['TeamSize'] = publication_df['TeamSize'].astype(int)
publication_df.to_hdf( os.path.join(self.path2database, self.path2pub_df, 'publication{}.hdf'.format(ifile)), key = 'pub', mode='w')
## load publication dataframe into a large file
pub_files_list = glob.glob(os.path.join(self.path2database, self.path2pub_df) + 'publication*.hdf')
pub_df = pd.DataFrame()
print("Parsing files...")
for tmp_pub_df in tqdm(paa_files_list, desc='PubMed author files', leave=True, disable=not show_progress):
pub_df = pub_df.append(pd.read_hdf(tmp_pub_df), ignore_index = True)
return pub_df
def parse_references(self, xml_directory='RawXML',preprocess = True, num_file_lines=10**7, rewrite_existing=False,show_progress=True):
"""
Parse the PubMed References raw data.
Parameters
----------
preprocess: bool, default True
Save the processed data in new DataFrames.
process_name: bool, default True
If True, then when processing the raw file, the package `NameParser <https://nameparser.readthedocs.io/en/latest/>`_
will be used to split author FullNames.
num_file_lines: int, default 5*10**6
The processed data will be saved into smaller DataFrames, each with `num_file_lines` rows.
show_progress: bool, default True
Show progress with processing of the data.
Returns
----------
DataFrame
Citations DataFrame.
"""
# process author files through xml
if preprocess:
if not os.path.exists(os.path.join(self.path2database, 'pub2ref')):
os.mkdir(os.path.join(self.path2database, 'pub2ref'))
xmlfiles = sorted([fname for fname in os.listdir(os.path.join(self.path2database, xml_directory)) if '.xml' in fname])
# read dtd - this takes
path2database = self.path2database # remove self to use inside of this class
class DTDResolver(etree.Resolver):
def resolve(self, system_url, public_id, context):
return self.resolve_filename(os.path.join(path2database, system_url), context)
parser = etree.XMLParser(load_dtd=True, resolve_entities=True)
ifile = 0
for xml_file_name in tqdm(xmlfiles, desc='PubMed reference xml files', leave=True, disable=not show_progress):
xmltree = etree.parse(os.path.join(self.path2database, xml_directory, xml_file_name), parser)
# check if the xml file was already parsed
dest_file_name = os.path.join(self.path2database, self.path2pub2ref_df,'pub2ref{}.hdf'.format(ifile))
if not rewrite_existing and os.path.isfile(dest_file_name):
ifile+=1
continue
pub2ref_df = []
all_pubmed_articles = xmltree.findall("/PubmedArticle")
for article_bucket in all_pubmed_articles:
medline = article_bucket.find("MedlineCitation")
# scrape the publication information
PublicationId = load_int(load_xml_text(medline.find('PMID')))
references = article_bucket.find("PubmedData/ReferenceList")
if not references is None:
for ref in references.findall("Reference"):
citation = load_xml_text(ref.find("Citation"))
if not ref.find('ArticleIdList') is None:
pmid = load_int(load_xml_text(ref.find('ArticleIdList').find('ArticleId[@IdType="pubmed"]')))
else:
pmid = ""
pub2ref_df.append([PublicationId, pmid, citation])
# save file
pub2ref_df = pd.DataFrame(pub2ref_df, columns = ['CitedPublicationId', 'CitingPublicationId', 'Citation'], dtype=int)
pub2ref_df.to_hdf( os.path.join(self.path2database, self.path2pub2ref_df, 'pub2ref{}.hdf'.format(ifile)), key = 'pub2ref', mode='w')
# load the citations into a large dataframe
pub2ref_files = glob.glob(os.path.join(self.path2database, self.path2pub2ref_df)+ 'pub2ref*.hdf')
pub2ref_df = pd.DataFrame()
print("parsing citation data...")
for pub2ref_tmp in tqdm(pub2ref_files,desc='PubMed citation xml files', leave=True, disable=not show_progress):
pub2ref_df = pub2ref_df.append(pd.read_hdf(pub2ref_tmp), ignore_indexTrue)
return pub2ref_df
def parse_publicationauthoraffiliation(self, xml_directory = 'RawXML',preprocess = True, num_file_lines=10**7, rewrite_existing = False):
"""
Parse the PubMed publication-author raw data.
Parameters
----------
preprocess: bool, default True
Save the processed data in new DataFrames.
process_name: bool, default True
If True, then when processing the raw file, the package `NameParser <https://nameparser.readthedocs.io/en/latest/>`_
will be used to split author FullNames.
num_file_lines: int, default 5*10**6
The processed data will be saved into smaller DataFrames, each with `num_file_lines` rows.
show_progress: bool, default True
Show progress with processing of the data.
Returns
----------
DataFrame
Publication-Author DataFrame.
"""
# process author files through xml
if preprocess:
if not os.path.exists(os.path.join(self.path2database, 'publicationauthoraffiliation')):
os.mkdir(os.path.join(self.path2database, 'publicationauthoraffiliation'))
xmlfiles = sorted([fname for fname in os.listdir(os.path.join(self.path2database, xml_directory)) if '.xml' in fname])
# read dtd - this takes
path2database = self.path2database # remove self to use inside of this class
class DTDResolver(etree.Resolver):
def resolve(self, system_url, public_id, context):
return self.resolve_filename(os.path.join(path2database, system_url), context)
parser = etree.XMLParser(load_dtd=True, resolve_entities=True)
ifile = 0
for xml_file_name in tqdm(xmlfiles, desc='PubMed author xml files', leave=True, disable=not show_progress):
# check if the xml file was already parsed
dest_file_name = os.path.join(self.path2database, self.path2paa_df,'publicationauthoraffiliation{}.hdf'.format(ifile))
if not rewrite_existing and os.path.isfile(dest_file_name):
ifile+=1
continue
paa_df = []
all_pubmed_articles = xmltree.findall("/PubmedArticle")
for article_bucket in all_pubmed_articles:
medline = article_bucket.find("MedlineCitation")
# scrape the publication information
PublicationId = load_int(load_xml_text(medline.find('PMID')))
author_list = article.find('AuthorList')
if not author_list is None:
for seq, author in enumerate(author_list.findall('Author')):
author_record = self._blank_pubmed_author()
author_record['PublicationId'] = PublicationId
author_record['FirstName'] = load_html_str(load_xml_text(author.find("ForeName")))
author_record['LastName'] = load_html_str(load_xml_text(author.find("LastName")))
author_record['FullName'] = author_record['FirstName'] + ' ' + author_record['LastName']
if author.find("AffiliationInfo/Affiliation") is not None:
author_record['Affiliations'] = load_html_str(load_xml_text(author.find("AffiliationInfo/Affiliation")))
author_record['Affiliations'] = author_record['Affiliations'].replace("For a full list of the authors' affiliations please see the Acknowledgements section.","")
author_record['AuthorSequence'] = seq+1
paa_df.append(author_record)
paa_df = pd.DataFrame(paa_df)
paa_df['AuthorSequence'] = paa_df['AuthorSequence'].astype(int)
paa_df.to_hdf( os.path.join(self.path2database, self.path2paa_df, 'publicationauthoraffiliation{}.hdf'.format(ifile)), key = 'paa', mode='w')
## load publication author dataframe into a large file
paa_files_list = glob.glob(os.path.join(self.path2database, self.path2paa_df) + 'publicationauthoraffiliation*.hdf')
paa_df = pd.DataFrame()
print("Parsing files...")
for tmp_paa_df in tqdm(paa_files_list, desc='PubMed author files', leave=True, disable=not show_progress):
paa_df = paa_df.append( | pd.read_hdf(tmp_paa_df) | pandas.read_hdf |
from abc import ABCMeta
from typing import Optional, List, Union, Dict, AnyStr
import pandas as pd
class Dataset(metaclass=ABCMeta):
fixes: List = []
data: Optional[Union[Dict, pd.DataFrame, pd.Series]] = None
sources: Union[AnyStr, Dict] = None
def register_fixes(self):
pass
def get(self, **kwargs) -> 'Dataset':
...
return self
def clean(self) -> 'Dataset':
...
return self
def to_df(self) -> pd.DataFrame:
if isinstance(pd.DataFrame, self.data):
return self.data
else: # assume it's something concatable
return | pd.concat(self.data) | pandas.concat |
from lifelines.datasets import load_waltons
from lifelines import KaplanMeierFitter
from lifelines.utils import median_survival_times
from lifelines.statistics import logrank_test,multivariate_logrank_test
from lifelines import CoxPHFitter
#from lifelines.plotting import add_at_risk_counts
from my_plotting import add_at_risk_counts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from matplotlib.backends.backend_pdf import PdfPages
#from sksurv.linear_model import CoxPHSurvivalAnalysis as cpa
#sys.exit()
#%%
cluster_result = | pd.read_csv("output/dna_rna_methy_cluster_result.csv") | pandas.read_csv |
# -*- coding:utf-8 -*-
##############################################################
# Created Date: Wednesday, September 2nd 2020
# Contact Info: <EMAIL>
# Author/Copyright: Mr. <NAME>
##############################################################
import random, urllib3, json, requests, math, plotly
import pandas as pd
import datetime as dt
import numpy as np
import plotly.figure_factory as ff
from time import sleep
from plotly import io as pio
from deap import algorithms
from datetime import datetime
import random,datetime,plotly,math
from itertools import combinations
from deap import base, creator, tools
import random, numpy,sys
import time
def generate3tables():
Table_SchedulingInfo_New = pd.DataFrame()
Table_Changeovers_New = pd.DataFrame()
Table_ShiftCalendar_New = pd.DataFrame()
Table_SchedulingInfo_New_Extended = pd.DataFrame()
LenOfSimulatedData = 100
DataRange_StartOfStart_EndOfStart = 10
DataDiff_StartDate_EndDate = 7
ProcessingTime_Max = 300
ProcessingTime_Min = 1
ChangeOverTime_Max = 30
ChangeOverTime_Min = 5
ProductionLine_Count = 4
Family_Count = 6
Priority_Count = 3
Families_List = []
ProductionLines_List = []
Priorities_List = []
start_date = []
end_date = []
processing_time = []
family_type = []
ProductionLine = []
workorder_num = []
changeover_time = []
Priority = []
Families_List = [np.append(Families_List,"Family_"+str(i+1)).tolist() for i in range (Family_Count )]
ProductionLines_List = [np.append(ProductionLines_List, int(i+1)).tolist() for i in range (ProductionLine_Count)]
Priorities_List = [np.append(Priorities_List, str(i+1)).tolist() for i in range (Priority_Count)]
newFamily_List = []
for fly in Families_List:
newFamily_List = np.append(newFamily_List, fly)
Families_List = newFamily_List
# Generate the Lists of Families_List and Production Lines
start = datetime.datetime.strptime(datetime.datetime.today().strftime("%Y-%m-%d"), "%Y-%m-%d")
date_list = [(start + datetime.timedelta(days=x)).strftime("%Y-%m-%d") for x in range(0, DataRange_StartOfStart_EndOfStart)]
for i in range(LenOfSimulatedData):
start_date = np.append(start_date, random.choice(date_list))
end_date = np.append(end_date,(datetime.datetime.strptime(random.choice(date_list), '%Y-%m-%d')+
datetime.timedelta(days=DataDiff_StartDate_EndDate)).strftime("%Y-%m-%d"))
processing_time = np.append(processing_time,random.randint(ProcessingTime_Min,ProcessingTime_Max))
family_type = np.append(family_type, random.choice(Families_List))
ProductionLine = np.append(ProductionLine, random.choice(ProductionLines_List))
Priority = np.append(Priority, random.choice(Priorities_List))
workorder_num = np.append(workorder_num, i)
for j in range(Family_Count):
changeover_time = np.append(changeover_time, random.randint(ChangeOverTime_Min,ChangeOverTime_Max))
Table_SchedulingInfo_New["Start_date"] = start_date
Table_SchedulingInfo_New["Due_date"] = end_date
Table_SchedulingInfo_New["Processing_time"] = processing_time
Table_SchedulingInfo_New["Family"] = family_type
Table_SchedulingInfo_New["ProductionLine"] = ProductionLine
Table_SchedulingInfo_New["Priority"] = Priority
Table_SchedulingInfo_New["ChangeoverSort"] = Table_SchedulingInfo_New["Family"]
Table_SchedulingInfo_New["WorkOrderNum"] = workorder_num
Lines = [i+1 for i in range(ProductionLine_Count)]
Possible_Com_Of_Lines = sum([list(map(list, combinations(Lines, i))) for i in range(len(Lines) + 1)], [])
del Possible_Com_Of_Lines[0]
WO_Num = 0
for index, row in Table_SchedulingInfo_New.iterrows():
OpLines = random.choice(Possible_Com_Of_Lines)
Option_Lines_Len = len(OpLines)
for i in range(Option_Lines_Len):
Table_SchedulingInfo_New_Extended = Table_SchedulingInfo_New_Extended.append({'OptionalLine': OpLines[i],
'BasicStartDate': row.Start_date,
'DeliveryDate':row.Due_date,
'ProcessingTimeMins':row.Processing_time+i,
'FamilyName':row.Family,
'ProductionLine': row.ProductionLine,
'WorkOrderNum': row.WorkOrderNum,
'MaterialPriority': row.ProductionLine,
'ChangeoverSort':row.ChangeoverSort,
'Priority':Priority}, ignore_index=True)
WO_Num += 1
Table_SchedulingInfo_New["Resource"] = Table_SchedulingInfo_New["Family"]
Table_SchedulingInfo_New["Task"] = Table_SchedulingInfo_New["ProductionLine"]
Table_SchedulingInfo_New["Start"] = Table_SchedulingInfo_New["Start_date"]
Table_SchedulingInfo_New["Finish"] = Table_SchedulingInfo_New["Due_date"]
color_dict = dict(zip(Table_SchedulingInfo_New.Resource.unique(),['rgb({},{},{})'.format(i[0],i[1],i[2])
for i in list(np.random.randint(255, size = (len(Table_SchedulingInfo_New.Resource.unique()),3)))]))
fig = ff.create_gantt(Table_SchedulingInfo_New.to_dict('records'), colors = color_dict, index_col='Resource', show_colorbar=True, group_tasks=True)
Table_Changeovers_New['ToChangeOver'] = Families_List
Table_Changeovers_New['MaxChangeOverTimeMin'] = changeover_time
# print(Table_Changeovers_New.ToChangeOver)
for date in date_list:
Table_ShiftCalendar_New = Table_ShiftCalendar_New.append({"ProductionDate":date,"ShiftAStart": "05:15:00", "ShiftAEnd":"15:20:00", "ShiftBStart":"15:30:00","ShiftBEnd":"01:35:00"}, ignore_index=True)
# Table_SchedulingInfo_New_Extended.to_csv('Table_SchedulingInfo_New.csv',index=False)
# Table_Changeovers_New.to_csv('Table_Changeovers_New.csv',index=False)
# Table_ShiftCalendar_New.to_csv('Table_ShiftCalendar_New.csv',index=False)
return Table_SchedulingInfo_New_Extended,Table_Changeovers_New,Table_ShiftCalendar_New
class ClosedLoopScheduling:
def __init__(self):
# plotly.io.orca.config.executable = '/Users/roche/anaconda3/pkgs/plotly-orca-1.3.1-1/orca.cmd'
# plotly.io.orca.config.save()
self.__Import_And_CleanData() # This line would get the intial solution, and optional lines to chose for each WO
self. HardConstraintPenalty = 150
def __len__(self):
return self.WO_SIZE
def __Import_And_CleanData(self):
JobID = random.randint(1,1000)
self.Table_SchedulingInfo,self.Table_ChangeOverInfo,self.Table_CalendarInfo = generate3tables()
self.Table_SchedulingInfo['ProductionLineCode_Cap'] = self.Table_SchedulingInfo.OptionalLine#.apply(lambda x: x.split('')[-1])
self.Table_CalendarInfo["ProductionDate_ShiftA"] = pd.to_datetime(self.Table_CalendarInfo["ProductionDate"] +' '+ self.Table_CalendarInfo["ShiftAStart"])
self.Table_CalendarInfo['ShiftAStart'] = pd.to_datetime(self.Table_CalendarInfo['ShiftAStart'] )
self.Table_CalendarInfo['ShiftAEnd'] = pd.to_datetime(self.Table_CalendarInfo['ShiftAEnd'] )
self.Table_CalendarInfo["ShiftA_deltaT_minutes"] = ((self.Table_CalendarInfo["ShiftAEnd"] - self.Table_CalendarInfo["ShiftAStart"]).dt.total_seconds())/60
self.Table_CalendarInfo["ShiftB_deltaT_minutes"] = self.Table_CalendarInfo["ShiftA_deltaT_minutes"]
FamilyGp_DupCount = self.Table_SchedulingInfo.groupby('FamilyName').size().sort_values(ascending=False).to_frame('DuplicateCount') # save the results in a dataframe [FamilyName, DuplicateCount]
self.Schedule_Info =pd.DataFrame()
for FamulyNameGroupItem, _ in FamilyGp_DupCount.iterrows():
df_grouped = self.Table_SchedulingInfo.loc[self.Table_SchedulingInfo.FamilyName == FamulyNameGroupItem] # df_grouped.loc[~df_grouped.ChangeoverSort.isin(self.Table_ChangeOverInfo.ToChangeOver.tolist()), 'ChangeoverSort'] = '1020Other'
self.Schedule_Info = self.Schedule_Info.append(df_grouped, ignore_index = True)
self.Schedule_Info.assign(MaxChangeOverTimeMin="")
# 5.1. ACurrent_Start Maximum ChangeOver Time of each family to the self.Schedule_Info table
for i, val in enumerate(self.Table_ChangeOverInfo.ToChangeOver.tolist()):
self.Schedule_Info.loc[self.Schedule_Info.ChangeoverSort == val, 'MaxChangeOverTimeMin'] = self.Table_ChangeOverInfo.MaxChangeOverTimeMin.iloc[i]
# ----------------------- 6. Create a completely new table to save the scheduled work (!!! 6.1. Sort WPT based on Family Group ) - #
self.minor_ChangeOver_Mins = 2.53
## print('#---------------- 6.1. Sort WPT based on Family Group ---------------#')
self.Schedule_Info["Optional_Lines"] = self.Schedule_Info.ProductionLineCode_Cap
# # ==================================== Objective Function Construction ======================================= #
self.Unique_WO_Array = self.Schedule_Info['WorkOrderNum'].unique()
self.WO_SIZE = len(self.Unique_WO_Array)
Unique_WO_Df = pd.DataFrame({'WorkOrderNum':self.Unique_WO_Array})
## print("# 2D Matrics for each work order:O_Lines,P_Times,WP_Times ")
O_Lines = [list((self.Schedule_Info['Optional_Lines'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()]
P_Times = [list((self.Schedule_Info['ProcessingTimeMins'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()]
## print("# 2D-1. Zaro paCurrent_Starting to make sure all optionals lines have same number of lines")
self.O_Lines_Array = np.array([i + [0]*(len(max(O_Lines, key=len))-len(i)) for i in O_Lines])
self.P_Times_Array = np.array([i + [0]*(len(max(P_Times, key=len))-len(i)) for i in P_Times])
## print("# 2D-2. If an element equalt to 0, relace with previous value in the row")
for idx, item in np.ndenumerate(self.O_Lines_Array):
if item == 0:
self.O_Lines_Array[(idx[0],idx[1])] = self.O_Lines_Array[(idx[0],idx[1]-1)]
self.P_Times_Array[(idx[0],idx[1])] = self.P_Times_Array[(idx[0],idx[1]-1)]
## print("# 1D Matrics for each work order:self.CV_Times, self.CV_Sorts, self.Fmily_T, self.BStart_Dates, self.Del_Dates")
self.Schedule_Info['BasicEndDate'] = self.Schedule_Info['DeliveryDate']
self.CV_Times = np.array([list(set(self.Schedule_Info['MaxChangeOverTimeMin'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
self.CV_Sorts = np.array([list(set(self.Schedule_Info['ChangeoverSort'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
self.Fmily_T = np.array([list(set(self.Schedule_Info['FamilyName'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
self.BStart_Dates = np.array([list(set(self.Schedule_Info['BasicStartDate'].loc[self.Schedule_Info['WorkOrderNum'] == x['WorkOrderNum']])) for _, x in Unique_WO_Df.iterrows()])
def Run_ToGetAllLines_Objectives(self,Chromosome_Solution):
Performances_df = pd.DataFrame()
line_viol = self.CandidateViolation(Chromosome_Solution)
for Line_Code in range(1,5):
self.__Objectives_Of_Each_Line(Line_Code,Chromosome_Solution)
Performances_df = Performances_df.append(pd.DataFrame({'LineName':'Line '+str(Line_Code),
'MakeSpanTime':(self.Line_Total_PT+self.Line_Total_CV_Times)/(60)},
index=[0]), ignore_index = True)
MkSpan_Dif = round(Performances_df['MakeSpanTime'].max() - Performances_df['MakeSpanTime'].min(),2)
return 150*line_viol+50*(MkSpan_Dif/24)
def Final_Run(self,Chromosome_Solution):
print(Chromosome_Solution)
Performances_df = pd.DataFrame()
PT_Mini = 0
line_viol = self.CandidateViolation(Chromosome_Solution)
self.OutputData_Of_Lines =pd.DataFrame()
OutputData =pd.DataFrame()
Unique_Lines = self.Schedule_Info.Optional_Lines.nunique()
for Line_Code in range(1,Unique_Lines+1):
Schedule_Of_The_Line,Start,Finish = self.__FinalRun_Obj_Of_Each_Line_SaveData(Line_Code,Chromosome_Solution)
self.OutputData_Of_Lines = self.OutputData_Of_Lines.append(Schedule_Of_The_Line)
Schedule_Of_The_Line.Start = Start
Schedule_Of_The_Line.Finish = Finish
OutputData = OutputData.append(Schedule_Of_The_Line)
Performances_df = Performances_df.append(pd.DataFrame({'LineName':'Line '+str(Line_Code),
'FamilyAtDayStartFactor':1,
'ProcessingTime':self.Line_Total_PT/(60),
'ChangeoverTime':self.Line_Total_CV_Times/(60),
'MakeSpanTime':(self.Line_Total_PT+self.Line_Total_CV_Times)/(60)},
index=[0]), ignore_index = True)
MkSpan_Dif = round(Performances_df['MakeSpanTime'].max() - Performances_df['MakeSpanTime'].min(),2)
PT_Mini =round(Performances_df['ProcessingTime'].sum(),2)
CVTimes_Mini =round(Performances_df['ChangeoverTime'].sum(),2)
Performances_df = Performances_df.append(pd.DataFrame({'LineName':'AllLines',
'FamilyAtDayStartFactor':1,
'ProcessingTime':PT_Mini,
'ChangeoverTime':CVTimes_Mini,
'MakeSpanTime':(PT_Mini+CVTimes_Mini)},
index=[0]), ignore_index = True)
single_Objective = 150*line_viol+50*(MkSpan_Dif/24)
OutputData = OutputData.rename(columns={"Task": "LineName", "Start":"BasicStartDate", "Finish":"BasicEndDate", "Resource":"Family"})
def Plot_Gantt_Chart(self):
# ------------------------------- Ploting the results by using Plotly Gantt Chart ---------------------------------------------- #
print(f'{"The shape of the OutputData_Of_Lines: "}{self.OutputData_Of_Lines.shape[0]}')
color_dict = dict(zip(self.OutputData_Of_Lines.Resource.unique(),['rgb({},{},{})'.format(i[0],i[1],i[2]) for i in list(np.random.randint(255, size = (len(self.OutputData_Of_Lines.Resource.unique()),3)))]))
fig = ff.create_gantt(self.OutputData_Of_Lines.to_dict(orient = 'records'),
colors = color_dict,
index_col = "Resource",
title = "Genetic Algorithm based Optimization",
show_colorbar = True,
bar_width = 0.3,
showgrid_x = False,
showgrid_y = True,
show_hover_fill=True)
fig_html = pio.to_html(fig)
# fig.show()
# print(fig_html)
# fig.write_image(r"CLS_GanttChart.png")
return fig_html
def CandidateViolation(self,Chromosome_Solution):
line_viol = 0
for idx, item in np.ndenumerate(Chromosome_Solution):
if item not in self.O_Lines_Array[idx[0]]:
line_viol += 1
return line_viol
def __Objectives_Of_Each_Line(self,Line_Code,Chromosome_Solution):
## print('#---------------- 6.2. Reset the initial start and end -------------#')
Line_Curr_End = self.Table_CalendarInfo.ProductionDate_ShiftA + pd.to_timedelta(self.minor_ChangeOver_Mins*60, unit='s')
Line_Curr_End = Line_Curr_End[0]
Shift_AandB_Period = self.Table_CalendarInfo.ShiftA_deltaT_minutes.values[0] + self.Table_CalendarInfo.ShiftB_deltaT_minutes.values[0]
ShiftBAPeriod = 220 # in mins (3 hours 40 minutes)
self.Line_CumMakeSpan = 0
self.Line_Total_PT = 0
self.Line_Total_CV_Times = 0
Line_WO_Idxes = []
Chromosome_Solution = np.array(Chromosome_Solution)
for ii, item in np.ndenumerate(np.array(np.where(Chromosome_Solution == Line_Code))):
Line_WO_Idxes.append(item)
# 2D Matrics
Line_CV_Sorts = self.CV_Sorts[(Line_WO_Idxes),0]
self.Line_Families = self.Fmily_T[(Line_WO_Idxes),0]
Line_CV_Times = self.CV_Times[(Line_WO_Idxes),0]
## print("6.2. Use all lines WorkOrder indexes to find total CV time and Family Sort Change ")
Previous_CV = Line_CV_Sorts[0]
self.CV_Times_Each_Order = []
for CV_idx, CV_ele in np.ndenumerate(Line_CV_Sorts):
if CV_ele != Previous_CV:
self.Line_Total_CV_Times += Line_CV_Times[CV_idx]
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,Line_CV_Times[CV_idx])
else:
self.Line_Total_CV_Times += self.minor_ChangeOver_Mins
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,self.minor_ChangeOver_Mins)
Previous_CV = CV_ele
## print("6.3.=========== Use all lines WorkOrder indexes to find the total processing time ================= ")
self.P_Times_Each_Order = []
self.Line_Late_Falg = []
curr_line_idx = 0
for _, WO_idx in np.ndenumerate(Line_WO_Idxes):
for i in np.where(self.O_Lines_Array[WO_idx]==Line_Code):
for j in i:
curr_line_idx=j
self.Line_Total_PT += self.P_Times_Array[(WO_idx,curr_line_idx)]
self.P_Times_Each_Order = np.append(self.P_Times_Each_Order,self.P_Times_Array[(WO_idx,curr_line_idx)])
def __FinalRun_Obj_Of_Each_Line_SaveData(self,Line_Code,Chromosome_Solution):
## print('#---------------- 6.2. Reset the initial start and end -------------#')
Line_Curr_End = self.Table_CalendarInfo.ProductionDate_ShiftA + pd.to_timedelta(self.minor_ChangeOver_Mins*60, unit='s')
Line_Curr_End = Line_Curr_End[0]
self.PlanStartTime = Line_Curr_End
intial_start_date = Line_Curr_End
Shift_AandB_Period = self.Table_CalendarInfo.ShiftA_deltaT_minutes.values[0] + self.Table_CalendarInfo.ShiftB_deltaT_minutes.values[0]
Shift_A_Period = self.Table_CalendarInfo.ShiftA_deltaT_minutes.values[0]
ShiftBAPeriod = 220 # in mins (3 hours 40 minutes)
self.Line_CumMakeSpan = 0
self.Line_Total_PT = 0
self.Line_Total_CV_Times = 0
## print("4. Obtain the line work order indexes for the line by determine at what positon line 1/2/3/4 is used")
Line_WO_Idxes = []
Chromosome_Solution = np.array(Chromosome_Solution)
for ii, item in np.ndenumerate(np.array(np.where(Chromosome_Solution == Line_Code))):
Line_WO_Idxes.append(item)
## print("5. Find the processing time, setup time, CV sorts, families, line calendar start time and delivery time for each line")
Line_WO_Num = self.Unique_WO_Array[(Line_WO_Idxes)]
# 2D Matrics
Line_CV_Sorts = self.CV_Sorts[(Line_WO_Idxes),0]
self.Line_Families = self.Fmily_T[(Line_WO_Idxes),0]
Line_CV_Times = self.CV_Times[(Line_WO_Idxes),0]
Previous_CV = Line_CV_Sorts[0]
self.CV_Times_Each_Order = []
for CV_idx, CV_ele in np.ndenumerate(Line_CV_Sorts):
if CV_ele != Previous_CV:
self.Line_Total_CV_Times += Line_CV_Times[CV_idx]
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,Line_CV_Times[CV_idx])
else:
self.Line_Total_CV_Times += self.minor_ChangeOver_Mins
self.CV_Times_Each_Order = np.append(self.CV_Times_Each_Order,self.minor_ChangeOver_Mins)
Previous_CV = CV_ele
## print("6.3. Use all lines WorkOrder indexes to find the total processing time ")
self.P_Times_Each_Order = []
curr_line_idx = 0
for _, WO_idx in np.ndenumerate(Line_WO_Idxes):
for i in np.where(self.O_Lines_Array[WO_idx]==Line_Code):
for j in i:
curr_line_idx=j
self.Line_Total_PT += self.P_Times_Array[(WO_idx,curr_line_idx)]
self.P_Times_Each_Order = np.append(self.P_Times_Each_Order,self.P_Times_Array[(WO_idx,curr_line_idx)])
## print("# 6.3.1. Define a dataframe to save all results for plotting")
LineSequence = 0
Schedule_Of_The_Line =pd.DataFrame()
for P_Time_idx, P_Time_ele in np.ndenumerate(self.P_Times_Each_Order):
Line_Remainder = self.Line_CumMakeSpan%(Shift_AandB_Period)
Line_Curr_CV_Time = self.CV_Times_Each_Order[P_Time_idx]
Line_Curr_P_Time = self.P_Times_Each_Order[P_Time_idx]
self.Line_CumMakeSpan += Line_Curr_P_Time + Line_Curr_CV_Time
if (Line_Remainder + Line_Curr_P_Time) > Shift_AandB_Period:
SecondP_of_P_Time = Line_Remainder + Line_Curr_P_Time-Shift_AandB_Period
FirstP_of_P_Time = Line_Curr_P_Time - SecondP_of_P_Time
Line_Curr_Start = Line_Curr_End + pd.to_timedelta((Line_Curr_CV_Time)*60, unit='s')
Line_Curr_End = Line_Curr_Start + pd.to_timedelta((FirstP_of_P_Time*60), unit='s')
Schedule_Of_The_Line = Schedule_Of_The_Line.append(pd.DataFrame({'Task':'Line '+str(Line_Code),
'Start':Line_Curr_Start,
'Finish':Line_Curr_End,
'ProcessingTimeInMins':FirstP_of_P_Time,
'WorkOrderNum':str(Line_WO_Num[P_Time_idx]) ,
'Resource': self.Line_Families[P_Time_idx],
'ChangeoverTimeInMins':Line_Curr_CV_Time,
'WorkOrderSplitCounter':0,
},
index=[0]), ignore_index = True)
Line_Curr_Start_b = Line_Curr_End + pd.to_timedelta((ShiftBAPeriod)*60, unit='s')
Line_Curr_End = Line_Curr_Start_b + pd.to_timedelta(SecondP_of_P_Time*60, unit='s')
Schedule_Of_The_Line = Schedule_Of_The_Line.append(pd.DataFrame({'Task':'Line '+str(Line_Code),
'Start':Line_Curr_Start_b,
'Finish':Line_Curr_End,
'ProcessingTimeInMins':Line_Curr_P_Time,
'WorkOrderNum':str(Line_WO_Num[P_Time_idx]) ,
'Resource': self.Line_Families[P_Time_idx],
'ChangeoverTimeInMins':0,
'WorkOrderSplitCounter':0,
},
index=[LineSequence]), ignore_index = True)
else:
Line_Curr_Start = Line_Curr_End + pd.to_timedelta(Line_Curr_CV_Time*60, unit='s')
Line_Curr_End = Line_Curr_Start + | pd.to_timedelta((Line_Curr_P_Time*60), unit='s') | pandas.to_timedelta |
"""
Following functions are specific to the analysis of the data saved
with BELLA control system
"""
import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import re
from numpy import unravel_index
import numpy as np
from scipy import stats
import json
from functions.data_analysis import df_outlier2none
def get_data(dir_date, nscan=None, para=None, trim_std=None):
'''Get DataFrame
dir_date: directory of a date where scan data is stored (str)
nscan: list of scan number(int)
para_list: list of parameters(str).No need to write the full name.
'''
path = get_scan_path(dir_date, nscan)
df = get_data_from_path(path, para)
#parameters to consider getting rid of outliers...(don't consider scan)
para_vals = list(df.columns)
if 'scan' in para_vals:
para_vals.remove('scan')
if 'DateTime Timestamp' in para_vals:
para_vals.remove('DateTime Timestamp')
if 'Shotnumber' in para_vals:
para_vals.remove('Shotnumber')
#get rid of outliers
if trim_std:
df_new = df_outlier2none(df, std=trim_std, columns = para_vals )
return df
def get_files_list(dirpath,f_format):
"""
get get path of all files with f_format in the directory
dir_date: directory path
f_format: ex) txt
"""
return sorted(glob.glob(dirpath+'/*.'+f_format))
def get_notebook_name():
"""
Return the full path of the jupyter notebook.
"""
import ipykernel
import requests
from requests.compat import urljoin
from notebook.notebookapp import list_running_servers
kernel_id = re.search('kernel-(.*).json',
ipykernel.connect.get_connection_file()).group(1)
servers = list_running_servers()
for ss in servers:
response = requests.get(urljoin(ss['url'], 'api/sessions'),
params={'token': ss.get('token', '')})
for nn in json.loads(response.text):
if nn['kernel']['id'] == kernel_id:
relative_path = nn['notebook']['path']
return os.path.join(ss['notebook_dir'], relative_path)
def save_dataframe(df, name, ipynb = None):
'''save dataframe under data/"current ipython name"/'''
#get the file name of ipynb
if ipynb == None:
ipynb_fullpath = get_notebook_name()
ipynb = os.path.splitext(os.path.basename(ipynb_fullpath))[0]
#Open the data folder if doesnt exist
if not os.path.exists('data_ipynb'):
os.makedirs('data_ipynb')
if not os.path.exists('data_ipynb/'+ipynb):
os.makedirs('data_ipynb/'+ipynb)
#Save data
df.to_pickle('data_ipynb/'+ipynb+'/'+name+'.pkl')
print(name+' saved')
return None
def load_dataframe(name, ipynb = None):
"""load dataframe which was saved using the function save_dataframe
name: correspons to the name of the daframe you sppecified with save_dataframe
ipynb: the ipynb name you are running. If None, it will be automatically aquired. (NOt working sometime).
"""
#get the file name of ipynb
if ipynb == None:
ipynb_fullpath = get_notebook_name()
ipynb = os.path.splitext(os.path.basename(ipynb_fullpath))[0]
load_path = 'data_ipynb/'+ipynb+'/'+name+'.pkl'
df = pd.read_pickle(load_path)
print(name+' loaded')
return df
def get_data_from_path(path_list, para_list = None):
'''Get DataFrame from the file.
path_list: a filename or list of multiple filenames. they will append all data sets.
para_list: list of parameters (column names) you want to select from dataframe
output: dataframe
'''
data_list = []
for i in range(len(path_list)):
data_i = pd.read_csv(path_list[i], sep='\t')
if para_list:
#get full name of the parameters
para_list_full = []
for j in para_list:
para_full = par_full(path_list[i], j)
if para_full:
para_list_full = para_list_full+[para_full]
#If you can get all parameters, append the data of the scan
if len(para_list_full) == len(para_list):
data_i = data_i[para_list_full]
data_list.append(data_i)
else:
print('Skip saving data from', os.path.basename(path_list[i]))
else:
#if there is no para_list, get all the parameters that are saved
data_list.append(data_i)
data = pd.concat(data_list, sort=False)
#rename column names to alias if exists
for col in data.columns:
if 'Alias:' in col:
alias = col.split('Alias:', 1)[1]
data = data.rename(columns={col:alias})
return data
def get_nscan_last(dir_date):
'''Get the last scan number which is already done'''
path = dir_date + '\\analysis'
if not os.path.isdir(path):
return 0
else:
# get last scan info file name
files = glob.glob(path + '\\s*info.txt')
file_last = os.path.basename(files[-1])
# regexp. find number in the file name
n_scans = int(re.findall(r"\d+", file_last)[0])
return n_scans
def get_scan_path(dir_date, nscan=None):
'''
Get a path of the scan file s**.txt in the analysis
nscan: List or int of scan number. if None, creat a list of all scan text paths
'''
#if nscan_list=None, make a list of all scan #s
if not nscan:
nscan_last = get_nscan_last(dir_date)
nscan_list = range(1, nscan_last+1)
elif isinstance(nscan, int):
nscan_list = [nscan]
else:
nscan_list = nscan
path_list = []
#make a list of all scan file paths
for i in nscan_list:
path = dir_date + '\\analysis\\s' + str(i) + '.txt'
path_list = path_list + [path]
return path_list
def par_full(file, par):
'''get a full name of the parameter'''
data = | pd.read_csv(file, sep='\t') | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
)
arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
arr = SparseArray(arr_data)
class TestGetitem:
def test_getitem(self):
dense = arr.to_dense()
for i in range(len(arr)):
tm.assert_almost_equal(arr[i], dense[i])
tm.assert_almost_equal(arr[-i], dense[-i])
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"slc",
[
np.s_[:],
np.s_[1:10],
np.s_[1:100],
np.s_[10:1],
np.s_[:-3],
np.s_[-5:-4],
np.s_[:-12],
np.s_[-12:],
np.s_[2:],
np.s_[2::3],
np.s_[::2],
np.s_[::-1],
np.s_[::-2],
np.s_[1:6:2],
np.s_[:-6:-2],
],
)
@pytest.mark.parametrize(
"as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []]
)
def test_getslice(self, slc, as_dense):
as_dense = np.array(as_dense)
arr = SparseArray(as_dense)
result = arr[slc]
expected = SparseArray(as_dense[slc])
tm.assert_sp_array_equal(result, expected)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:], fill_value=0)
tm.assert_sp_array_equal(res, exp)
msg = "too many indices for array"
with pytest.raises(IndexError, match=msg):
sparse[4:, :]
with pytest.raises(IndexError, match=msg):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
def test_getitem_bool_sparse_array(self):
# GH 23122
spar_bool = SparseArray([False, True] * 5, dtype=np.bool8, fill_value=True)
exp = SparseArray([np.nan, 2, np.nan, 5, 6])
tm.assert_sp_array_equal(arr[spar_bool], exp)
spar_bool = ~spar_bool
res = arr[spar_bool]
exp = SparseArray([np.nan, 1, 3, 4, np.nan])
tm.assert_sp_array_equal(res, exp)
spar_bool = SparseArray(
[False, True, np.nan] * 3, dtype=np.bool8, fill_value=np.nan
)
res = arr[spar_bool]
exp = SparseArray([np.nan, 3, 5])
| tm.assert_sp_array_equal(res, exp) | pandas._testing.assert_sp_array_equal |
import logging
from collections import defaultdict
from concurrent.futures import FIRST_EXCEPTION, wait
from itertools import product
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from hermes.typeo import typeo
from rich.progress import Progress
from bbhnet.analysis.analysis import integrate
from bbhnet.analysis.distributions import DiscreteDistribution
from bbhnet.analysis.normalizers import GaussianNormalizer
from bbhnet.io.h5 import write_timeseries
from bbhnet.io.timeslides import Segment, TimeSlide
from bbhnet.logging import configure_logging
from bbhnet.parallelize import AsyncExecutor, as_completed
event_times = [1186302519.8, 1186741861.5, 1187058327.1, 1187529256.5]
event_names = ["GW170809", "GW170814", "GW170818", "GW170823"]
events = {name: time for name, time in zip(event_names, event_times)}
def load_segment(segment: Segment):
"""
Quick utility function which just wraps a Segment's
`load` method so that we can execute it in a process
pool since methods aren't picklable.
"""
segment.load("out")
return segment
def get_write_dir(
write_dir: Path, norm: Optional[float], shift: Union[str, Segment]
) -> Path:
"""
Quick utility function for getting the name of the directory
to which to save the outputs from an analysis using a particular
time-shift/norm-seconds combination
"""
if isinstance(shift, Segment):
shift = shift.shift
write_dir = write_dir / f"norm-seconds.{norm}" / shift
write_dir.mkdir(parents=True, exist_ok=True)
return write_dir
def build_background(
thread_ex: AsyncExecutor,
process_ex: AsyncExecutor,
pbar: Progress,
background_segments: Iterable[Segment],
data_dir: Path,
write_dir: Path,
max_tb: float,
window_length: float = 1.0,
norm_seconds: Optional[Iterable[float]] = None,
num_bins: int = int(1e4),
):
"""
For a sequence of background segments, compute a discrete
distribution of integrated neural network outputs using
the indicated integration window length for each of the
normalization window lengths specified. Iterates through
the background segments in order and tries to find as
many time-shifts available for each segment as possible
in the specified data directory, stopping iteration through
segments once a maximum number of seconds of bacgkround have
been generated.
As a warning, there's a fair amount of asynchronous execution
going on in this function, and it may come off a bit complex.
Args:
thread_ex:
An `AsyncExecutor` that maintains a thread pool
for writing analyzed segments in parallel with
the analysis processes themselves.
process_ex:
An `AsyncExecutor` that maintains a process pool
for loading and integrating Segments of neural
network outputs.
pbar:
A `rich.progress.Progress` object for keeping
track of the progress of each of the various
subtasks.
background_segments:
The `Segment` objects to use for building a
background distribution. `data_dir` will be
searched for all time-shifts of each segment
for parallel analysis. Once `max_tb` seconds
worth of background have been generated, iteration
through this array will be terminated, so segments
should be ordered by some level of "importance",
since it's likely that segments near the back of the
array won't be analyzed for lower values of `max_tb`.
data_dir:
Directory containing timeslide root directories,
which will be mined for time-shifts of each `Segment`
in `background_segments`. If a time-shift doesn't exist
for a given `Segment`, the time-shift is ignored.
write_dir:
Root directory to which to write integrated NN outputs.
For each time-shift analyzed and normalization window
length specified in `norm_seconds`, results will be
written to a subdirectory
`write_dir / "norm-seconds.{norm}" / shift`, which
will be created if it does not exist.
max_tb:
The maximum number of seconds of background data
to analyze for each value of `norm_seconds` before
new segments to shift and analyze are no longer sought.
However, because we use _every_ time-shift for each
segment we iterate through, its possible that each
background distribution will utilize slightly more
than this value.
window_length:
The length of the integration window to use
for analysis in seconds.
norm_seconds:
An array of normalization window lengths to use
to standardize the integrated neural network outputs.
(i.e. the output timeseries is the integral over the
previous `window_length` seconds, normalized by the
mean and standard deviation of the previous `norm`
seconds before that, where `norm` is each value in
`norm_seconds`). A `norm` value of `None` in the
`norm_seconds` iterable indicates
no normalization, and if `norm_seconds` is left as
`None` this will be the only value used.
num_bins:
The number of bins to use to initialize the discrete
distribution used to characterize the background
distribution.
Returns:
A dictionary mapping each value in `norm_seconds` to
an associated `DiscreteDistribution` characterizing
its background distribution.
"""
write_dir.mkdir(exist_ok=True)
norm_seconds = norm_seconds or [norm_seconds]
# keep track of the min and max values of each normalization
# window's background and the corresponding filenames so
# that we can fit a discrete distribution to it after the fact
mins = defaultdict(lambda: float("inf"))
maxs = defaultdict(lambda: -float("inf"))
# keep track of all the files that we've written
# for each normalization window size so that we
# can iterate through them later and submit them
# for reloading once we have our distributions initialized
fname_futures = defaultdict(list)
# iterate through timeshifts of our background segments
# until we've generated enough background data.
background_segments = iter(background_segments)
main_task_id = pbar.add_task("[red]Building background", total=max_tb)
while not pbar.tasks[main_task_id].finished:
segment = next(background_segments)
# since we're assuming here that the background
# segments are being provided in reverse chronological
# order (with segments closest to the event segment first),
# exhaust all the time shifts we can of each segment before
# going to the previous one to keep data as fresh as possible
load_futures = {}
for shift in data_dir.iterdir():
try:
shifted = segment.make_shift(shift.name)
except ValueError:
# this segment doesn't have a shift
# at this value, so just move on
continue
# load all the timeslides up front in a separate thread
# TODO: O(1GB) memory means segment.length * N ~O(4M),
# so for ~O(10k) long segments this means this should
# be fine as long as N ~ O(100). Worth doing a check for?
future = process_ex.submit(load_segment, shifted)
load_futures[shift.name] = [future]
# create progress bar tasks for each one
# of the subprocesses involved for analyzing
# this set of timeslides
load_task_id = pbar.add_task(
f"[cyan]Loading {len(load_futures)} {segment.length}s timeslides",
total=len(load_futures),
)
analyze_task_id = pbar.add_task(
"[yelllow]Integrating timeslides",
total=len(load_futures) * len(norm_seconds),
)
write_task_id = pbar.add_task(
"[green]Writing integrated timeslides",
total=len(load_futures) * len(norm_seconds),
)
# now once each segment is loaded, submit a job
# to our process pool to integrate it using each
# one of the specified normalization periods
integration_futures = {}
sample_rate = None
for shift, seg in as_completed(load_futures):
# get the sample rate of the NN output timeseries
# dynamically from the first timeseries we load,
# since we'll need it to initialize our normalizers
if sample_rate is None:
t = seg._cache["t"]
sample_rate = 1 / (t[1] - t[0])
for norm in norm_seconds:
# build a normalizer for the given normalization window length
if norm is not None:
normalizer = GaussianNormalizer(norm * sample_rate)
else:
normalizer = None
# submit the integration job and have it update the
# corresponding progress bar task once it completes
future = process_ex.submit(
integrate,
seg,
kernel_length=1.0,
window_length=window_length,
normalizer=normalizer,
)
future.add_done_callback(
lambda f: pbar.update(analyze_task_id, advance=1)
)
integration_futures[(norm, shift)] = [future]
# advance the task keeping track of how many files
# we've loaded by one
pbar.update(load_task_id, advance=1)
# make sure we have the expected number of jobs submitted
if len(integration_futures) < (len(norm_seconds) * len(load_futures)):
raise ValueError(
"Expected {} integration jobs submitted, "
"but only found {}".format(
len(norm_seconds) * len(load_futures),
len(integration_futures),
)
)
# as the integration jobs come back, write their
# results using our thread pool and record the
# min and max values for our discrete distribution
segment_futures = []
for (norm, shift), (t, y, integrated) in as_completed(
integration_futures
):
# submit the writing job to our thread pool and
# use a callback to keep track of all the filenames
# for a given normalization window
shift_dir = get_write_dir(write_dir, norm, shift)
future = thread_ex.submit(
write_timeseries,
shift_dir,
t=t,
y=y,
integrated=integrated,
)
future.add_done_callback(
lambda f: pbar.update(write_task_id, advance=1)
)
fname_futures[norm].append(future)
segment_futures.append(future)
# keep track of the max and min values for each norm
mins[norm] = min(mins[norm], integrated.min())
maxs[norm] = max(maxs[norm], integrated.max())
# wait for all the writing to finish before we
# move on so that we don't overload our processes
wait(segment_futures, return_when=FIRST_EXCEPTION)
pbar.update(main_task_id, advance=len(load_futures) * segment.length)
# now that we've analyzed enough background data,
# we'll initialize background distributions using
# the min and max bounds we found during analysis
# and then load everything back in to bin them
# within these bounds
Tb = pbar.tasks[main_task_id].completed
logging.info(f"Accumulated {Tb}s of background matched filter outputs.")
# submit a bunch of jobs for loading these integrated
# segments back in for discretization
load_futures = defaultdict(list)
for norm, fname in as_completed(fname_futures):
future = process_ex.submit(load_segment, Segment(fname))
load_futures[norm].append(future)
# create a task for each one of the normalization windows
# tracking how far along the distribution fit is
fit_task_ids = {}
for norm in norm_seconds:
norm_name = f"{norm}s" if norm is not None else "empty"
task_id = pbar.add_task(
"[purple]Fitting background using {} normalization window".format(
norm_name
),
total=len(load_futures[norm]),
)
fit_task_ids[norm] = task_id
# now discretized the analyzed segments as they're loaded back in
backgrounds = {}
for norm, segment in as_completed(load_futures):
try:
# if we already have a background distribution
# for this event, grab it and fit it with a
# "warm start" aka don't ditch the existing histogram
background = backgrounds[norm]
warm_start = True
except KeyError:
# otherwise create a new distribution
# and fit it from scratch
mn, mx = mins[norm], maxs[norm]
background = DiscreteDistribution("integrated", mn, mx, num_bins)
backgrounds[norm] = background
warm_start = False
# fit the distribution to the new data and then
# update the corresponding task tracker
background.fit(segment, warm_start=warm_start)
pbar.update(fit_task_ids[norm], advance=1)
return backgrounds
def check_if_needs_analyzing(
event_segment: Segment,
norm_seconds: Iterable[Optional[float]],
characterizations: pd.DataFrame,
) -> Iterable[Optional[float]]:
times = [t for t in event_times if t in event_segment]
names = [name for name in event_names if events[name] in times]
combos = set(product(names, norm_seconds))
remaining = combos - set(characterizations.index)
# only do analysis on those normalization
# values that we haven't already done
# (sorry, you'll still have to do it for all events,
# but those are miniscule by comparison)
norm_seconds = list(set([j for i, j in remaining]))
return norm_seconds, names, times
def analyze_event(
thread_ex: AsyncExecutor,
process_ex: AsyncExecutor,
characterizations: pd.DataFrame,
timeseries: pd.DataFrame,
event_segment: Segment,
background_segments: Iterable[Segment],
data_dir: Path,
write_dir: Path,
results_dir: Path,
max_tb: float,
window_length: float = 1.0,
norm_seconds: Optional[Iterable[float]] = None,
num_bins: int = int(1e4),
force: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Use timeshifts of a set of previous segments to build a
background distribution with which to analyze a segment
containing an event and characterizing the false alaram
rate of that event as a function of time from the event
trigger.
"""
# first check if we can skip this analysis altogether
# because we already have data on it and we're not
# forcing ourselves to re-analyze
norm_seconds = norm_seconds or [norm_seconds]
if not force:
norm_seconds, names, times = check_if_needs_analyzing(
event_segment, norm_seconds, characterizations
)
if len(norm_seconds) == 0:
logging.info(
f"Already analyzed events in segment {event_segment}, skipping"
)
return
with Progress() as pbar:
# TODO: exclude segments with events?
backgrounds = build_background(
thread_ex,
process_ex,
pbar,
background_segments=background_segments,
data_dir=data_dir,
write_dir=write_dir,
window_length=window_length,
norm_seconds=norm_seconds,
max_tb=max_tb,
num_bins=num_bins,
)
# now use the fit background to characterize the
# significance of BBHNet's detection around the event
for norm, background in backgrounds.items():
if norm is not None:
normalizer = GaussianNormalizer(norm)
else:
normalizer = None
logging.info(
"Characterizing events {} with normalization "
"window length {}".format(", ".join(names), norm)
)
t, y, integrated = integrate(
event_segment,
kernel_length=1,
window_length=window_length,
normalizer=normalizer,
)
fname = write_timeseries(
get_write_dir(write_dir, norm, event_segment),
t=t,
y=y,
integrated=integrated,
)
# create a segment and add the existing data to
# its cache so that we don't try to load it again
segment = Segment(fname)
segment._cache = {"t": t, "integrated": integrated}
fars, latencies = background.characterize_events(
segment, times, window_length=window_length, metric="far"
)
# for each one of the events in this segment,
# record the false alarm rate as a function of
# time and add it to our dataframe then checkpoint it.
# Then isolate the timeseries of both the NN outputs and
# the integrated values around the event and write those
# to another dataframe and checkpoint that as well
for far, latency, name, time in zip(fars, latencies, names, times):
logging.info(f"\t{name}:")
logging.info(f"\t\tFalse Alarm Rates: {list(far)}")
logging.info(f"\t\tLatencies: {list(latency)}")
df = pd.DataFrame(
dict(
event_name=[name] * len(far),
norm_seconds=[norm] * len(far),
far=far,
latency=latency,
)
).set_index(["event_name", "norm_seconds"])
characterizations = pd.concat([characterizations, df])
characterizations.to_csv(results_dir / "characterizations.csv")
# keep the one second before the trigger,
# during the event after the trigger, and
# after the event trigger has left the kernel
mask = (time - 1 < t) & (t < time + 2)
df = pd.DataFrame(
dict(
event_name=[name] * mask.sum(),
norm_seconds=[norm] * mask.sum(),
t=t[mask] - time,
y=y[mask],
integrated=integrated[mask],
)
).set_index(["event_name", "norm_seconds"])
timeseries = | pd.concat([timeseries, df]) | pandas.concat |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [2., 1],
'grp': ['A', 'B']
},
index=['Y-weighted var(X)', 'Y-weighted var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_standard_deviation_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std())
def test_standard_deviation_biased(self):
metric = metrics.StandardDeviation('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0))
def test_standard_deviation_split_by_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std()
expected.name = 'sd(X)'
testing.assert_series_equal(output, expected)
def test_standard_deviation_where(self):
metric = metrics.StandardDeviation('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std()
self.assertEqual(output, expected)
def test_standard_deviation_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sd(X)': [self.df.X.std()]})
testing.assert_frame_equal(output, expected)
def test_standard_deviation_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.std()]}, index=['sd(X)'])
expected.index.name = 'Metric'
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
import re
import numpy as np
import pandas as pd
import pytest
from woodwork import DataTable
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
ZIPCode
)
def test_datatable_physical_types(sample_df):
dt = DataTable(sample_df)
assert isinstance(dt.physical_types, dict)
assert set(dt.physical_types.keys()) == set(sample_df.columns)
for k, v in dt.physical_types.items():
assert isinstance(k, str)
assert v == sample_df[k].dtype
def test_sets_category_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
pd.Series(['a', pd.NaT, 'c'], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_category_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for logical_type in logical_types:
ltypes = {
column_name: NaturalLanguage,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_object_dtype_on_update(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: NaturalLanguage
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: LatLong})
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_string_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
]
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_string_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for logical_type in logical_types:
ltypes = {
column_name: Categorical,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_type = Boolean
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_update():
column_name = 'test_series'
series = pd.Series([0, 1, 0], name=column_name)
series = series.astype('object')
ltypes = {
column_name: Integer,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: Boolean})
assert dt.columns[column_name].logical_type == Boolean
assert dt.columns[column_name].dtype == Boolean.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == Boolean.pandas_dtype
def test_sets_int64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_int64_dtype_on_update():
column_name = 'test_series'
series = pd.Series([1.0, 2.0, 1.0], name=column_name)
series = series.astype('object')
logical_types = [Integer]
for logical_type in logical_types:
ltypes = {
column_name: Double,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_float64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_type = Double
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_float64_dtype_on_update():
column_name = 'test_series'
series = pd.Series([0, 1, 0], name=column_name)
series = series.astype('object')
ltypes = {
column_name: Integer,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: Double})
assert dt.columns[column_name].logical_type == Double
assert dt.columns[column_name].dtype == Double.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == Double.pandas_dtype
def test_sets_datetime64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['2020-01-01', '2020-01-02', '2020-01-03'], name=column_name),
pd.Series(['2020-01-01', None, '2020-01-03'], name=column_name),
pd.Series(['2020-01-01', np.nan, '2020-01-03'], name=column_name),
pd.Series(['2020-01-01', pd.NA, '2020-01-03'], name=column_name),
pd.Series(['2020-01-01', pd.NaT, '2020-01-03'], name=column_name),
]
logical_type = Datetime
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_datetime_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['2020-01-01', '2020-01-02', '2020-01-03'], name=column_name)
series = series.astype('object')
ltypes = {
column_name: NaturalLanguage,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: Datetime})
assert dt.columns[column_name].logical_type == Datetime
assert dt.columns[column_name].dtype == Datetime.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == Datetime.pandas_dtype
def test_invalid_dtype_casting():
column_name = 'test_series'
# Cannot cast a column with pd.NA to Double
series = pd.Series([1.1, pd.NA, 3], name=column_name)
ltypes = {
column_name: Double,
}
err_msg = 'Error converting datatype for column test_series from type object to type ' \
'float64. Please confirm the underlying data is consistent with logical type Double.'
with pytest.raises(TypeError, match=err_msg):
DataTable(pd.DataFrame(series), logical_types=ltypes)
# Cannot cast Datetime to Double
series = pd.Series(['2020-01-01', '2020-01-02', '2020-01-03'], name=column_name)
ltypes = {
column_name: Datetime,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
err_msg = 'Error converting datatype for column test_series from type datetime64[ns] to type ' \
'float64. Please confirm the underlying data is consistent with logical type Double.'
with pytest.raises(TypeError, match=re.escape(err_msg)):
dt.set_types(logical_types={column_name: Double})
# Cannot cast invalid strings to integers
series = pd.Series(['1', 'two', '3'], name=column_name)
ltypes = {
column_name: Integer,
}
err_msg = 'Error converting datatype for column test_series from type object to type ' \
'Int64. Please confirm the underlying data is consistent with logical type Integer.'
with pytest.raises(TypeError, match=err_msg):
DataTable(pd.DataFrame(series), logical_types=ltypes)
def test_int_dtype_inference_on_init():
df = pd.DataFrame({
'ints_no_nans': pd.Series([1, 2]),
'ints_nan': pd.Series([1, np.nan]),
'ints_NA': pd.Series([1, pd.NA]),
'ints_NA_specified': pd.Series([1, pd.NA], dtype='Int64')})
df_from_dt = DataTable(df).to_dataframe()
assert df_from_dt['ints_no_nans'].dtype == 'Int64'
assert df_from_dt['ints_nan'].dtype == 'float64'
assert df_from_dt['ints_NA'].dtype == 'category'
assert df_from_dt['ints_NA_specified'].dtype == 'Int64'
def test_bool_dtype_inference_on_init():
df = pd.DataFrame({
'bools_no_nans': pd.Series([True, False]),
'bool_nan': | pd.Series([True, np.nan]) | pandas.Series |
'''
This sample shows how to set Column Format with DataFrame and from_df, to_df functions.
Make sure you've installed pandas. To install the module,
open the Script Window (Shift+Alt+3), type the following and press Enter:
pip install pandas
The following will check and install:
pip -chk pandas
'''
import originpro as op
import pandas as pd
# Create a dataframe to fill the sheet
df = pd.DataFrame({
'Date': ['10/25/2018','02/21/2019','04/01/2020'],
'Gender':['Male','Male','Female'],
'Score': [75.5, 86.7, 91],
})
df['Date'] = pd.to_datetime(df['Date'])
df['Gender']= | pd.Categorical(df['Gender']) | pandas.Categorical |
import sys
sys.path.append("..")
# ignore pandas futurewarning
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import os
import pytest
import pandas as pd
from heritageconnector.entity_matching import reconciler, lookup
@pytest.fixture
def fixt():
data = | pd.DataFrame.from_dict({"item_name": ["photograph", "camera", "model"]}) | pandas.DataFrame.from_dict |
import sys
import math
import pandas as pd
import numpy as np
num = 0
data1 = []
data2 = []
with open(sys.argv[1]) as fp:
num = int(fp.readline())
for i in range(num):
line = fp.readline()
s = line.split()
data1.append(float(s[0]))
data2.append(float(s[1]))
s1 = | pd.Series(data1) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import polling2
import requests
import json
from web3 import Web3
import pandas as pd
from decouple import config
from datetime import datetime
import logging
from collections import defaultdict
import time
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
from models import EdenBlock, Epoch, Base, Distribution, DistributionBalance
from apscheduler.schedulers.background import BackgroundScheduler
INFURA_ENDPOINT = config('INFURA_ENDPOINT')
PSQL_ENDPOINT = config('PSQL_ENDPOINT')
engine = create_engine(PSQL_ENDPOINT)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query_dict = {
'block': 'block.graphql',
'distribution': 'distribution.graphql',
'block_lookup': 'block_lookup.graphql',
'epoch_latest': 'epoch_latest.graphql',
'epoch': 'epoch.graphql'
}
eden_governance_api = 'https://api.thegraph.com/subgraphs/name/eden-network/governance'
eden_distribution_api = 'https://api.thegraph.com/subgraphs/name/eden-network/distribution'
eden_network_api = 'https://api.thegraph.com/subgraphs/name/eden-network/network'
def query_to_dict(rset):
result = defaultdict(list)
for obj in rset:
instance = inspect(obj)
for key, x in instance.attrs.items():
result[key].append(x.value)
return result
def get_web3_provider():
infura_endpoint = INFURA_ENDPOINT
my_provider = Web3.HTTPProvider(infura_endpoint)
w3 = Web3(my_provider)
return w3
def get_latest_eth_block():
eden_db_last_block = get_latest_eden_block_db()
w3 = get_web3_provider()
latest_eth_block = w3.eth.get_block('latest')['number']
if latest_eth_block > eden_db_last_block:
return latest_eth_block
else:
return None
def get_latest_eden_block_db():
eden_db_last_block = session.query(EdenBlock).order_by(desc(EdenBlock.block_number)).limit(1).all()
if eden_db_last_block != []:
eden_db_last_block = eden_db_last_block[0].block_number
else:
eden_db_last_block = 0
return eden_db_last_block
def clean_epoch_entry(epoch_string):
epoch_number = int(epoch_string.split('+')[1].replace('epoch', ''))
return int(epoch_number)
def get_latest_distribution_number():
eden_db_last_number_query = session.query(Distribution).order_by(desc(Distribution.distribution_number)).limit(1).all()
if eden_db_last_number_query != []:
eden_last_number = eden_db_last_number_query[0].distribution_number
return eden_last_number
else:
return 0
def ipfs_link_cleanup(raw_uri):
final_ipfs_link = "https://ipfs.io/ipfs/" + raw_uri.split('//')[1]
return final_ipfs_link
def graph_query_call(api, query, variables=None):
request = requests.post(api, json={'query': query, 'variables': variables})
if request.status_code == 200:
return request.json()
else:
Exception('Query failed. return code is {}. {}'.format(request.status_code, query))
def fetch_query(query):
query_file = query_dict.get(query)
with open(query_file, 'r') as file:
data = file.read()
return data
def get_epoch_number(block_number):
epoch_number_query = session.query(Epoch).filter(block_number >= Epoch.start_block_number, block_number <= Epoch.end_block_number).limit(1).all()
if epoch_number_query != []:
epoch_number = epoch_number_query[0].epoch_number
return epoch_number
else:
latest_epoch = get_latest_epoch()
return latest_epoch
def get_latest_epoch():
query = fetch_query('epoch_latest')
latest_epoch_result = graph_query_call(eden_governance_api, query)
latest_epoch_id = latest_epoch_result['data']['epoches'][0]['id']
latest_epoch_number = clean_epoch_entry(latest_epoch_id)
return latest_epoch_number
def get_block_number_from_id(block_id):
query = fetch_query('block_lookup')
variables = {'block_id': block_id}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_block_number = int(block_result['data']['block']['number'])
return eden_block_number
def eden_block_call():
last_block = 0
last_block_current = get_latest_eth_block()
eden_blocks_df = pd.DataFrame()
while True:
query = fetch_query('block')
variables = {'number_gt': last_block}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_blocks_df_temp = pd.DataFrame.from_dict(block_result['data']['blocks'])
eden_blocks_df = eden_blocks_df.append(eden_blocks_df_temp)
last_block = int(eden_blocks_df.iloc[-1]['number'])
if last_block >= last_block_current:
break
eden_blocks_df = eden_blocks_df.drop_duplicates()
logging.info('Eden Blocks Pulled To DataFrame')
logging.info('Adding Eden Blocks To Database Now')
eden_last_block_db = get_latest_eden_block_db()
eden_blocks_df = eden_blocks_df[pd.to_numeric(eden_blocks_df['number']) >= eden_last_block_db]
for index, row in eden_blocks_df.iterrows():
block_id_query = session.query(EdenBlock).filter(EdenBlock.id==row['id']).limit(1).all() or None
if block_id_query is None:
epoch_number = get_epoch_number(row['number'])
eden_block_entry = EdenBlock(
id = row['id'],
author = row['author'],
difficulty = row['difficulty'],
gas_limit = row['gasLimit'],
gas_used = row['gasUsed'],
block_hash = row['hash'],
block_number = row['number'],
parent_hash = row['parentHash'],
uncle_hash = row['unclesHash'],
size = row['size'],
state_root = row['stateRoot'],
timestamp = datetime.fromtimestamp(int(row['timestamp'])),
total_difficulty = row['totalDifficulty'],
transactions_root = row['transactionsRoot'],
receipts_root = row['receiptsRoot'],
epoch_number = epoch_number
)
session.add(eden_block_entry)
session.commit()
logging.info('Eden Blocks Added To Database Now')
def eden_epoch_call():
eden_epochs_df = pd.DataFrame()
query = fetch_query('epoch')
epoch_result = graph_query_call(eden_governance_api, query)
eden_epochs_df = | pd.DataFrame.from_dict(epoch_result['data']['epoches']) | pandas.DataFrame.from_dict |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with | option_context("display.max_rows", 3) | pandas.option_context |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 23 11:42:40 2014
@author: viktor
plumy.SensorColumn - SensorColumn respresentation holding SensorBoard instances
The SensorBoard class is the top link in our Object Oriented Data Model.
After construction a SensorColumn instance holds 9 SensorBoard class instances.
There are a total of 6 SensorColumns in the plumy setup.
"""
import numpy as np
import pandas as pd
import scipy as sp
import os
from .SensorBoard import SensorBoard
from .base import base
try:
import zipfile
have_zipfile = True
except ImportError:
have_zipfile = False
class SensorColumn(base):
def __init__(self, data_location, gas, loc, voltage, speed, trial, _args):
"""
data_location: URL-style path to the location of the data. Currently
supported are the following:
file://<path> - <path> points to the location of the desired file,
i.e. the full path to the desired sensor column.
hdf://<path> - <path> points to the location of the HDFcache with
the data. The location of the desired data in the HDFcache is
inferred from the gas, loc, voltage, speed, and trial params.
zip://<path> - <path> contains the path to the WTD_upload.zip file,
and continues with the path to the desired data in the zip
file. E.g.: /User/enose/WTD_upload.zip/CO_1000/L2/1234.csv .
"""
self._init = False
super(SensorColumn, self).__init__(gas=gas,
loc=loc,
voltage=voltage,
speed=speed,
trial=trial,
_args=_args)
if _args['use_HDFcache']:
try:
self.HDFcache = _args['HDF_cache_location']
except KeyError:
raise Warning('Have been told to use the HDFcache, \n' +\
"but caller didn't disclose its location.\n" + \
'Continuing without HDFcache.')
self.HDFcache = None
else:
self.HDFcache = None
self.load(data_location)
self.set_name('Column %s' % loc)
self.set_time(self.get_time())
self.set_filter()
baseline = self.get_baseline()
if baseline is None: # No readings from mfc sensor
print('Warning:\n{}:'.format(data_location))
print("No readings from mass flow controller, don't expect useful data.")
print("Assuming 10 s baseline.\n")
baseline = (0,1000)
self.set_baseline(baseline)
if self._args['drop_duplicates']:
self.drop_duplicates()
if self._args['fill_gaps']:
pass
# self.fill_gaps(rate=20, window=150)
if self._args['resample']:
self.resample(rate=100)
self.Board1 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 1', self._Time,
[self._Data.B1S1, self._Data.B1S2,
self._Data.B1S3, self._Data.B1S4,
self._Data.B1S5, self._Data.B1S6,
self._Data.B1S7, self._Data.B1S8],
self._Baseline, self._Filter)
self.Board2 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 2', self._Time,
[self._Data.B2S1, self._Data.B2S2,
self._Data.B2S3, self._Data.B2S4,
self._Data.B2S5, self._Data.B2S6,
self._Data.B2S7, self._Data.B2S8],
self._Baseline, self._Filter)
self.Board3 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 3', self._Time,
[self._Data.B3S1, self._Data.B3S2,
self._Data.B3S3, self._Data.B3S4,
self._Data.B3S5, self._Data.B3S6,
self._Data.B3S7, self._Data.B3S8],
self._Baseline, self._Filter)
self.Board4 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 4', self._Time,
[self._Data.B4S1, self._Data.B4S2,
self._Data.B4S3, self._Data.B4S4,
self._Data.B4S5, self._Data.B4S6,
self._Data.B4S7, self._Data.B4S8],
self._Baseline, self._Filter)
self.Board5 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 5', self._Time,
[self._Data.B5S1, self._Data.B5S2,
self._Data.B5S3, self._Data.B5S4,
self._Data.B5S5, self._Data.B5S6,
self._Data.B5S7, self._Data.B5S8],
self._Baseline, self._Filter)
self.Board6 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 6', self._Time,
[self._Data.B6S1, self._Data.B6S2,
self._Data.B6S3, self._Data.B6S4,
self._Data.B6S5, self._Data.B6S6,
self._Data.B6S7, self._Data.B6S8],
self._Baseline, self._Filter)
self.Board7 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 7', self._Time,
[self._Data.B7S1, self._Data.B7S2,
self._Data.B7S3, self._Data.B7S4,
self._Data.B7S5, self._Data.B7S6,
self._Data.B7S7, self._Data.B7S8],
self._Baseline, self._Filter)
self.Board8 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 8', self._Time,
[self._Data.B8S1, self._Data.B8S2,
self._Data.B8S3, self._Data.B8S4,
self._Data.B8S5, self._Data.B8S6,
self._Data.B8S7, self._Data.B8S8],
self._Baseline, self._Filter)
self.Board9 = SensorBoard(gas, loc, voltage, speed,
trial, _args, 'Board 9', self._Time,
[self._Data.B9S1, self._Data.B9S2,
self._Data.B9S3, self._Data.B9S4,
self._Data.B9S5, self._Data.B9S6,
self._Data.B9S7, self._Data.B9S8],
self._Baseline, self._Filter)
if self.HDFcache is not None: # then save it in the cache
self.save_hdf5(self.HDFcache, overwrite=False)
self._init = True
def __call__(self):
if hasattr(self, '_Data'):
return self.get_all
print('\nNo data for'),
print(self)
def __str__(self):
if self._init:
return '\n\n %s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s \
\n\n\n \
Min. Resistance:\t\t%i Ohm (Sensor %d, Board %i)\n \
Max. Resistance:\t\t%i Ohm (Sensor %d, Board %i)\n \
Max. Variance:\t\t%d Ohm (Sensor %i, Board %i )\n \
Max. Average:\t\t%d Ohm (Sensor %i, Board %i)\n \
Sampling Rate:\t\t%f Hz' % \
(self.get_name(), self.Board1, self.Board2, self.Board3,
self.Board4, self.Board5, self.Board6, self.Board7,
self.Board8, self.get_min()[2], self.get_min()[1], self.get_min()[0],
self.get_max()[2],self.get_max()[1], self.get_max()[0], self.get_var()[2],
self.get_var()[1], self.get_var()[0], self.get_mean()[2], self.get_mean()[1],
self.get_mean()[0], self.sample_rate)
else:
return '\n'+'_'*100+'\n\n %s:\n\n\t+ '\
'+ Gas: %s\n\t+ Location: %s\n\t+ SensorVoltage: %s\n\t' \
'+ FanSpeed: %s\n\t+ Verbose: %s' % \
(self.Type, self.Gas,
self.Location, self.SensorVoltage, self.FanSpeed, self._args['verbose'])
def load(self, data_location):
"""
Use pandas to load column-data from file.
"""
load_successful = False
if data_location.startswith('hdf://'): # load from HDF
HDF_location = data_location[6:]
self.load_hdf5(HDF_location)
load_successful = True
return
loaded_from_cache = False
if self.HDFcache is not None:
try:
self.load_hdf5(self.HDFcache)
loaded_from_cache = True
load_successful = True
except IOError as e:
print( 'No HDFStore at {}.\n'.format(self.HDFcache) +\
'Creating one for you.\n'.format(self.__class__) + \
'Reverting to standard (slow) CSV parsing.')
store = pd.HDFStore(self.HDFcache,
complevel=9,
complib='blosc')
store.close()
except KeyError:
print('Data not found in HDF cache: \n' + \
'{}\n'.format(self._get_hd5_dataname()) + \
'Trying to parse from original data.')
if not loaded_from_cache:
user_cols = list(range(92))
exclude = [11,20,29,38,47,56,65,74,83]
for x in exclude:
user_cols.remove(x)
col_names = \
['Time','FanSetPoint','FanReading','Mfc1_SetPoint',
'Mfc2_SetPoint','Mfc3_SetPoint','Mfc1_Read','Mfc2_Read',
'Mfc3_Read','Temp','RelHumid',
'B1S1','B1S2','B1S3','B1S4','B1S5','B1S6','B1S7','B1S8',
'B2S1','B2S2','B2S3','B2S4','B2S5','B2S6','B2S7','B2S8',
'B3S1','B3S2','B3S3','B3S4','B3S5','B3S6','B3S7','B3S8',
'B4S1','B4S2','B4S3','B4S4','B4S5','B4S6','B4S7','B4S8',
'B5S1','B5S2','B5S3','B5S4','B5S5','B5S6','B5S7','B5S8',
'B6S1','B6S2','B6S3','B6S4','B6S5','B6S6','B6S7','B6S8',
'B7S1','B7S2','B7S3','B7S4','B7S5','B7S6','B7S7','B7S8',
'B8S1','B8S2','B8S3','B8S4','B8S5','B8S6','B8S7','B8S8',
'B9S1','B9S2','B9S3','B9S4','B9S5','B9S6','B9S7','B9S8']
if data_location.startswith('zip://'):
if not have_zipfile:
raise Exception(
'Loading from zip file requires pyfilesysytem, but it ' +
'seems this is not installed. Either get pyfilesystem ' +
'or unpack the data zip file and work from there.')
splitted = data_location[6:].split('.zip')
if len(splitted) != 2:
raise Warning('Zip location contains multiple .zip ' +
'extensions.\n Using the last .zip for parsing.')
splitted = splitted[-2:]
zippath = splitted[0] + '.zip'
filepath = splitted[1]
while filepath.startswith('/'):
filepath = filepath[1:]
with zipfile.ZipFile(zippath, 'r') as zf:
with zf.open(filepath, 'r') as fp:
self._Data = pd.read_table(fp,
names=col_names,
header=0,
index_col='Time',
usecols=user_cols)
load_successful = True
elif data_location.startswith('file://'):
data_file = data_location[7:]
if not os.path.exists(data_file):
raise AssertionError(
'File {} not found.'.format(data_file))
self._Data = pd.read_table(data_file,
names=col_names,
header=0,
index_col='Time',
usecols=user_cols)
load_successful = True
else:
raise Exception(
"Couldn't parse data type from {}...".format(
data_location[:10]))
if not load_successful:
raise(Warning("Didn't load anything from location \n{}".format(
data_location)))
return
def save_hdf5(self, storepath, overwrite=True):
"""
Saves this SensorColumn as HDF5 object in fixed format, i.e.
non-appendable and non-queryable, but fast.
Parameters:
storepath - the hdf5 store where to save the object.
overwrite - replace existing data (default True).
"""
if self._Data is None:
raise Exception('No data to save. Call {}.load() first.'.format(self.__class__))
dataname = self._get_hd5_dataname()
with pd.HDFStore(storepath) as store:
if not overwrite:
node = store.get_node(dataname)
if node is not None:
return
store.put(dataname, self._Data, format='table')
def load_hdf5(self, storepath):
"""
Loads the Data from an HDFstore instead of parsing it.
Parameters:
storepath - the path to the HDFStore.
"""
if self._Data is not None:
raise Exception('This {} already has data. \n'.format(self.__class__) + 'Refusing to overwrite it with data from HDF5 store.')
return
self._Data = pd.read_hdf(storepath, self._get_hd5_dataname())
def _get_hd5_dataname(self):
return '{}/{}/Fan{}_{}_trial{}'.format(self.Gas, self.Location, self.FanSpeed, self.SensorVoltage, self.Trial)
def find_duplicates(self):
"""
Return index of duplicated Timestamps.
"""
if not hasattr(self, '_Data'):
raise AssertionError
time = self._Time
dupes_idx = np.where(time[1:] == time[:-1])[0]
if dupes_idx.size == 0:
if self._args['verbose']:
print('No duplicates detected.')
return None
if self._args['verbose']:
print('[%s] %s: Detected %i duplicate timestamps.' % (len(self.TimeStamp), self.get_name(), len(dupes_idx)))
return dupes_idx
def drop_duplicates(self):
"""
Searches for duplicate Timestamps and drops them by reindexing the
pandas DataFrame.
"""
time = self._Time.copy()
idx = self.find_duplicates()
if idx is None:
return
self._Data = self._Data[~self._Data.index.duplicated(keep='first')]
assert self._Data.index.is_unique
self.set_time(time)
if self._args['verbose']:
print('[%s] %s: Removed %i duplicates.' % (self.TimeStamp, self.get_name(), idx.size))
def find_gaps(self, thresh = 100):
"""
Find periods of missing data ('gaps').
Returns arrays of Start_Idx, (Start_Time, Stop_Time))
Keyword arguments:
thresh -- Gap threshold in ms.
"""
assert hasattr(self, '_Time')
assert isinstance(thresh, (int, float))
time = self._Time
diff = time[1:] - time[:-1]
indx = np.where(diff > thresh)[0]
start_time = time[indx]
stop_time = time[(indx + 1)]
return (indx, start_time, stop_time)
def fill_gaps(self, rate = 20, window = 0):
"""
Try to find and fill large periods of the missing Signal ('gaps')
with white-noise. The statistical properties of the generated noise is
sampled from the right and left edges of the gap. The Method iterates
over every Sensor of the SensorColumn.
Keyword arguments:
rate -- Sample rate to use
window -- Size of sampling window. If zero use dynamic window size.
"""
assert isinstance(rate, int)
assert isinstance(window, int)
sample_rate = rate
window_size = window
nonuniform_x = self.get_time()
(gap_indx, gap_start, gap_stop,) = self.find_gaps(100)
board_names = [ 'B%iS%i' % (board, sensor) for board in range(1, 10) for sensor in range(1, 9) ]
for n in board_names:
unfiltered_y = getattr(self._Data, n).dropna().values
for (i, indx,) in enumerate(gap_indx):
if i == 0:
fixed_x = nonuniform_x[:indx]
fixed_y = unfiltered_y[:indx]
else:
win = slice(gap_indx[(i - 1)] + 1, indx)
fixed_x = np.append(fixed_x, nonuniform_x[win])
fixed_y = np.append(fixed_y, unfiltered_y[win])
num_samples = (gap_stop[i] - gap_start[i]) / sample_rate
if window_size == 0:
window_size = num_samples
win_left = slice(indx - window_size, indx)
left_unfiltered_y = unfiltered_y[win_left]
win_right = slice(indx + 1, indx + window_size + 1)
right_unfiltered_y = unfiltered_y[win_right]
spl_left = sp.interpolate.splrep(nonuniform_x[win_left], left_unfiltered_y)
spl_right = sp.interpolate.splrep(nonuniform_x[win_right], right_unfiltered_y)
newx = np.arange(gap_start[i], gap_stop[i], sample_rate)
if len(newx) == num_samples + 1:
newx = np.arange(gap_start[i], gap_stop[i] - sample_rate, sample_rate)
(newx_left, newx_right,) = np.array_split(newx, 2)
scopex_left = np.concatenate((nonuniform_x[win_left], newx_left))
scopex_right = np.concatenate((newx_right, nonuniform_x[win_right]))
scopey_left = sp.interpolate.splev(scopex_left, spl_left)
scopey_right = sp.interpolate.splev(scopex_right, spl_right)
new_win_left = slice(len(nonuniform_x[win_left]), len(scopex_left))
new_win_right = slice(0, len(newx_right))
newy = np.concatenate((scopey_left[new_win_left], scopey_right[new_win_right]))
fixed_x = np.append(fixed_x, newx)
fixed_y = np.append(fixed_y, newy)
win = slice(gap_indx[-1] + 1, len(nonuniform_x))
fixed_x = np.append(fixed_x, nonuniform_x[win])
fixed_y = np.append(fixed_y, unfiltered_y[win])
series = pd.Series(data=fixed_y, index=fixed_x)
if n == 'B1S1':
self._Data = self._Data.reindex(index=fixed_x)
self._Data[n] = series
if self._args['verbose']:
print('%s: Reconstructed %i gaps.' % (self.get_name(), len(gap_indx)))
def resample(self, rate=100):
"""
Resample the signal to sample rate parameter (like it should be).
Step I : Create new time series (uniform sample rate).
Step II : Interpolate old signal (Univariate Spline through all points)
Step III: Update DataFrame (reindex, replace old Series with new ones)
Keyword arguments:
rate -- Target sample rate
"""
step = rate / float(10)
uniform_x = np.arange(step, 260000 + step, step)
_Data = self._Data.reindex(index= | pd.Index(uniform_x) | pandas.Index |
import numbers
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike, DtypeObj
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import register_extension_dtype
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow
class _IntegerDtype(BaseMaskedDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
name: str
base = None
type: Type
def __repr__(self) -> str:
sign = "U" if self.is_unsigned_integer else ""
return f"{sign}Int{8 * self.itemsize}Dtype()"
@cache_readonly
def is_signed_integer(self) -> bool:
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self) -> bool:
return self.kind == "u"
@property
def _is_numeric(self) -> bool:
return True
@cache_readonly
def numpy_dtype(self) -> np.dtype:
""" Return an instance of our numpy dtype """
return np.dtype(self.type)
@cache_readonly
def kind(self) -> str:
return self.numpy_dtype.kind
@cache_readonly
def itemsize(self) -> int:
""" Return the number of bytes in this dtype """
return self.numpy_dtype.itemsize
@classmethod
def construct_array_type(cls) -> Type["IntegerArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return IntegerArray
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# we only handle nullable EA dtypes and numeric numpy dtypes
if not all(
isinstance(t, BaseMaskedDtype)
or (
isinstance(t, np.dtype)
and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))
)
for t in dtypes
):
return None
np_dtype = np.find_common_type(
[t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], []
)
if np.issubdtype(np_dtype, np.integer):
return STR_TO_DTYPE[str(np_dtype)]
return None
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "IntegerArray":
"""
Construct IntegerArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
if not array.type.equals(pyarrow_type):
array = array.cast(pyarrow_type)
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=self.type)
int_arr = IntegerArray(data.copy(), ~mask, copy=False)
results.append(int_arr)
return IntegerArray._concat_same_type(results)
def integer_array(values, dtype=None, copy: bool = False) -> "IntegerArray":
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : bool, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
def safe_cast(values, dtype, copy: bool):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError as err:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
) from err
def coerce_to_array(
values, dtype, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = STR_TO_DTYPE[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
]:
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not ( | is_integer_dtype(values) | pandas.core.dtypes.common.is_integer_dtype |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import transition
from ...utils import testing as ust
@pytest.fixture
def basic_df():
return pd.DataFrame(
{'x': range(5),
'y': range(5, 10)},
index=range(100, 105))
@pytest.fixture
def year():
return 2112
@pytest.fixture
def totals_col():
return 'total'
@pytest.fixture
def rates_col():
return 'growth_rate'
@pytest.fixture
def grow_targets(year, totals_col):
return pd.DataFrame({totals_col: [7]}, index=[year])
@pytest.fixture
def grow_targets_filters(year, totals_col):
return pd.DataFrame({'x_min': [0, 2, np.nan],
'y_max': [7, 9, np.nan],
'x': [np.nan, np.nan, 4],
totals_col: [1, 4, 10]},
index=[year, year, year])
@pytest.fixture(scope='function')
def random_df(request):
"""
Seed the numpy prng and return a data frame w/ predictable test inputs
so that the tests will have consistent results across builds.
"""
old_state = np.random.get_state()
def fin():
# tear down: reset the prng after the test to the pre-test state
np.random.set_state(old_state)
request.addfinalizer(fin)
np.random.seed(1)
return pd.DataFrame(
{'some_count': np.random.randint(1, 8, 20)},
index=range(0, 20))
@pytest.fixture
def growth_rates(rates_col, totals_col, grow_targets):
del grow_targets[totals_col]
grow_targets[rates_col] = [0.4]
return grow_targets
@pytest.fixture
def growth_rates_filters(rates_col, totals_col, grow_targets_filters):
del grow_targets_filters[totals_col]
grow_targets_filters[rates_col] = [0.5, -0.5, 0]
return grow_targets_filters
def assert_empty_index(index):
pdt.assert_index_equal(index, pd.Index([]))
def assert_for_add(new, added):
assert len(new) == 7
pdt.assert_index_equal(added, pd.Index([105, 106]))
def assert_for_remove(new, added):
assert len(new) == 3
assert_empty_index(added)
def test_add_rows(basic_df):
nrows = 2
new, added, copied = transition.add_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_starting_index(basic_df):
nrows = 2
starting_index = 1000
new, added, copied = transition.add_rows(basic_df, nrows, starting_index)
assert len(new) == len(basic_df) + nrows
pdt.assert_index_equal(added, pd.Index([1000, 1001]))
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_zero(basic_df):
nrows = 0
new, added, copied = transition.add_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
def test_add_rows_with_accounting(random_df):
control = 10
new, added, copied = transition.add_rows(
random_df, control, accounting_column='some_count')
assert control == new.loc[copied]['some_count'].sum()
assert copied.isin(random_df.index).all()
def test_remove_rows(basic_df):
nrows = 2
new, removed_indexes = transition.remove_rows(basic_df, nrows)
assert_for_remove(new, transition._empty_index())
assert len(removed_indexes) == nrows
assert removed_indexes.isin(basic_df.index).all()
def test_remove_rows_zero(basic_df):
nrows = 0
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(removed)
def test_remove_rows_all(basic_df):
nrows = len(basic_df)
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df.loc[[]])
ust.assert_index_equal(removed, basic_df.index)
def test_remove_rows_with_accounting(random_df):
control = 10
new, removed = transition.remove_rows(
random_df, control, accounting_column='some_count')
assert control == random_df.loc[removed]['some_count'].sum()
assert removed.isin(random_df.index).all()
def test_remove_rows_raises(basic_df):
# should raise ValueError if asked to remove more rows than
# are in the table
nrows = 25
with pytest.raises(ValueError):
transition.remove_rows(basic_df, nrows)
def test_add_or_remove_rows_add(basic_df):
nrows = 2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == abs(nrows)
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_add_or_remove_rows_remove(basic_df):
nrows = -2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_remove(new, added)
assert len(removed) == abs(nrows)
assert removed.isin(basic_df.index).all()
assert_empty_index(copied)
def test_add_or_remove_rows_zero(basic_df):
nrows = 0
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_grtransition_add(basic_df):
growth_rate = 0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_add(new, added)
assert len(copied) == 2
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_grtransition_add_with_accounting(random_df):
growth_rate = .1
year = 2012
orig_total = random_df['some_count'].sum()
growth = int(round(orig_total * growth_rate))
target = orig_total + growth
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert growth == new.loc[copied]['some_count'].sum()
assert target == new['some_count'].sum()
assert copied.isin(random_df.index).all()
assert_empty_index(removed)
def test_grtransition_remove(basic_df):
growth_rate = -0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_remove(new, added)
assert_empty_index(copied)
assert len(removed) == 2
assert removed.isin(basic_df.index).all()
def test_grtransition_remove_with_accounting(random_df):
growth_rate = -.1
year = 2012
orig_total = random_df['some_count'].sum()
change = -1 * int(round(orig_total * growth_rate))
target = orig_total - change
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert change == random_df.loc[removed]['some_count'].sum()
assert target == new['some_count'].sum()
assert removed.isin(random_df.index).all()
assert_empty_index(added)
assert_empty_index(copied)
def test_grtransition_remove_all(basic_df):
growth_rate = -1
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_grtransition_zero(basic_df):
growth_rate = 0
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_tgrtransition_add(basic_df, growth_rates, year, rates_col):
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 7
bdf_imax = basic_df.index.values.max()
assert pd.Series([bdf_imax + 1, bdf_imax + 2]).isin(new.index).all()
assert len(copied) == 2
assert_empty_index(removed)
def test_tgrtransition_remove(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = -0.4
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 3
assert_empty_index(added)
assert_empty_index(copied)
assert len(removed) == 2
def test_tgrtransition_with_accounting(random_df):
"""
Test segmented growth rate transitions--with an accounting
column--using 1 test w/ mixed growth rates trends:
declining, growing and no growth.
"""
grp1 = random_df.copy()
grp1['segment'] = 'a'
grp2 = random_df.copy()
grp2['segment'] = 'b'
grp3 = random_df.copy()
grp3['segment'] = 'c'
test_df = pd.concat([grp1, grp2, grp3], axis=0, ignore_index=True)
orig_total = random_df['some_count'].sum()
year = 2012
growth_rates = pd.DataFrame(
{
'grow_rate': [-0.1, 0.25, 0],
'segment': ['a', 'b', 'c']
},
index=[year, year, year])
tgrt = transition.TabularGrowthRateTransition(
growth_rates, 'grow_rate', 'some_count')
new, added, copied, removed = tgrt.transition(test_df, year)
added_rows = new.loc[copied]
removed_rows = test_df.loc[removed]
# test a declining segment
a_added_rows = added_rows[added_rows['segment'] == 'a']
a_removed_rows = removed_rows[removed_rows['segment'] == 'a']
a_change = int(round(orig_total * -0.1))
a_target = orig_total + a_change
assert a_change * -1 == a_removed_rows['some_count'].sum()
assert a_target == new[new['segment'] == 'a']['some_count'].sum()
assert_empty_index(a_added_rows.index)
# test a growing segment
b_added_rows = added_rows[added_rows['segment'] == 'b']
b_removed_rows = removed_rows[removed_rows['segment'] == 'b']
b_change = int(round(orig_total * 0.25))
b_target = orig_total + b_change
assert b_change == b_added_rows['some_count'].sum()
assert b_target == new[new['segment'] == 'b']['some_count'].sum()
assert_empty_index(b_removed_rows.index)
# test a no change segment
c_added_rows = added_rows[added_rows['segment'] == 'c']
c_removed_rows = removed_rows[removed_rows['segment'] == 'c']
assert orig_total == new[new['segment'] == 'c']['some_count'].sum()
assert_empty_index(c_added_rows.index)
assert_empty_index(c_removed_rows.index)
def test_tgrtransition_remove_all(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = -1
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_tgrtransition_zero(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = 0
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_tgrtransition_filters(
basic_df, growth_rates_filters, year, rates_col):
tgrt = transition.TabularGrowthRateTransition(
growth_rates_filters, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 5
assert basic_df.index.values.max() + 1 in new.index
assert len(copied) == 1
assert len(removed) == 1
def test_tabular_transition_add(basic_df, grow_targets, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert_for_add(new, added)
bdf_imax = basic_df.index.values.max()
assert pd.Series([bdf_imax + 1, bdf_imax + 2]).isin(new.index).all()
assert len(copied) == 2
assert_empty_index(removed)
def test_tabular_transition_remove(basic_df, grow_targets, totals_col, year):
grow_targets[totals_col] = [3]
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert_for_remove(new, added)
assert_empty_index(copied)
assert len(removed) == 2
def test_tabular_transition_remove_all(
basic_df, grow_targets, totals_col, year):
grow_targets[totals_col] = [0]
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_tabular_transition_raises_on_bad_year(
basic_df, grow_targets, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
with pytest.raises(ValueError):
tran.transition(basic_df, year + 100)
def test_tabular_transition_add_filters(
basic_df, grow_targets_filters, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets_filters, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert len(new) == grow_targets_filters[totals_col].sum()
assert basic_df.index.values.max() + 1 in new.index
assert len(copied) == 11
assert len(removed) == 1
def test_update_linked_table(basic_df):
col_name = 'x'
added = pd.Index([5, 6, 7])
copied = | pd.Index([1, 3, 1]) | pandas.Index |
import logging
import re
from copy import copy
from pathlib import PosixPath
from typing import Dict, List, Tuple
import pandas as pd
from pandas import DataFrame, Series
from omegaconf import DictConfig
from .mortality_file_extractor import MortalityFileExtractor
class MortalityXLSXExtractor(MortalityFileExtractor):
"""
Extracts mortality facts from Statistics Poland XLS files
"""
def __init__(self, file_path: PosixPath, cfg: DictConfig):
self.file_path = file_path
self.cfg = cfg
self.mortality_facts = DataFrame()
self.log = logging.getLogger(__name__)
@property
def reported_year(self) -> int:
"""
Returns the reported actuals year
"""
return int(self.file_path.stem.split('_')[-1])
@property
def genders(self) -> Dict[str, int]:
return self.cfg.raw_data.genders
@property
def fact_columns(self) -> List[str]:
return self.cfg.raw_data.fact_columns
@property
def regions(self) -> Dict[str, int]:
return self.cfg.raw_data.regions
@property
def age_groups(self) -> Dict[str, int]:
return self.cfg.raw_data.age_groups
@property
def fact_year(self) -> int:
return int(self.file_path.stem.split('_')[-1])
def extract_actuals(self) -> None:
"""
Extracts mortality data facts
"""
for gender_data in self.genders.items():
self._extract_gender_sheet(gender_data)
if not self.mortality_facts.empty:
self.log.info(f'Year: {self.fact_year} - {len(self.mortality_facts)} mortality facts extracted ({self.mortality_facts.deceased_actuals.sum():.0f} of deaths in total)')
return self.mortality_facts
def _extract_gender_sheet(self, gender_data: Tuple[str, int]) -> None:
"""
Exctracts mortality data facts from a given gender sheet
"""
gender_sheet = self._get_gender_sheet(gender_data)
gender_sheet_facts = self._transform_gender_sheet_into_facts_table(gender_sheet)
self.mortality_facts = | pd.concat((self.mortality_facts, gender_sheet_facts)) | pandas.concat |
import glob
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#%%
# Create ML of sites overlapped ([# replicates in set] + 1)
def calcML(df):
ML_df = df[df.filter(like="methRate").columns].stack()
return ML_df
#samples = ['G1' 'G2', 'G3', 'G4','MF_rep1','MF_rep2','SRR8170377', 'SRR8170378', 'SRR8170379','SRR8170380']
conditions = ["G","SRR"] # "MF","pMF",
#%%
#import all sites
total_file = "allm5C_libraries_filteredDepthAnno.csv"
total_df = pd.read_csv(total_file, low_memory=False) # file
overlapped_site_ML = {}
exclusive_site_ML = {}
for set in conditions:
print(set)
ext = "_allOverlapRepDepth.csv"
#ext = "_allUnion.csv"
for file in glob.glob("**/"+set+ext, recursive=True):
print(file)
df_set = pd.read_csv(file, low_memory=False)
name = "_"+set
df = df_set[df_set.filter(like=name).columns]
df['group'] = df_set['group']
df = df[df['group'].isin(total_df['group'])]
# subset total site columns df by name
df_exclusive = total_df[total_df.filter(like=name).columns]
df_exclusive['group'] = total_df['group']
# Remove sites
df_exclusive = df_exclusive[~df_exclusive['group'].isin(df_set['group'])]
print("subset overlapped")
overlapped_site_ML[set] = calcML(df)
print("subset exlusive")
exclusive_site_ML[set] = calcML(df_exclusive)
overlapped_df = pd.DataFrame(overlapped_site_ML)
exclusive_df = | pd.DataFrame(exclusive_site_ML) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
README: 環保署水質 API 多次下載打包工具
Logic:
1. CSV 多次下載成本地檔案
2. 讀檔成 pd, 合併成全部的 CSV
Filename: WaterQuality_startdate_enddate[_offset].csv
@author: wuulong
"""
import requests
import os
import pandas as pd
from datetime import timedelta, datetime
import time
# date format: 2020-06-01
def water_quality(start_str, end_str):
offset = 0
#global df_all
df_all = None
while True:
#url = "https://opendata.epa.gov.tw/webapi/api/rest/datastore/355000000I-001255?filters=SampleDate ge '%s' and SampleDate lt '%s' &sort=SampleDate&offset=%i&limit=1000&format=csv" % (start_str, end_str, offset)
#url = "http://opendata.epa.gov.tw/webapi/Data/WQXRiver/?$filter=ItemName%20eq%20%27%E6%B2%B3%E5%B7%9D%E6%B1%A1%E6%9F%93%E5%88%86%E9%A1%9E%E6%8C%87%E6%A8%99%27&$orderby=SampleDate%20desc&$skip=%i&$top=1000&format=csv" %(offset)
url = "https://opendata.epa.gov.tw/webapi/api/rest/datastore/355000000I-001255?filters=ItemName eq '河川污染分類指標'&sort=SampleDate&offset=%i&limit=1000&format=csv" %(offset)
file_datestr = "output/WaterQuality_%s_%s" %(start_str,end_str)
filename = "%s_%04i.csv" %(file_datestr,offset)
cont = True
while cont:
try:
url_get(filename, url)
df = | pd.read_csv(filename) | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from ..timeseries import TimeSeries
class TimeSeriesTestCase(unittest.TestCase):
times = pd.date_range('20130101', '20130110')
pd_series1 = pd.Series(range(10), index=times)
pd_series2 = pd.Series(range(5, 15), index=times)
pd_series3 = pd.Series(range(15, 25), index=times)
series1: TimeSeries = TimeSeries(pd_series1)
series2: TimeSeries = TimeSeries(pd_series1, pd_series2, pd_series3)
series3: TimeSeries = TimeSeries(pd_series2)
def test_creation(self):
with self.assertRaises(ValueError):
# Index is dateTimeIndex
TimeSeries(pd.Series(range(10), range(10)))
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_lo = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_hi = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, None, pd_hi)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, None, pd_lo)
with self.assertRaises(ValueError):
# Main series cannot have date holes
range_ = pd.date_range('20130101', '20130104').append(pd.date_range('20130106', '20130110'))
TimeSeries(pd.Series(range(9), index=range_))
series_test = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
def test_alt_creation(self):
with self.assertRaises(ValueError):
# Series cannot be lower than three
index = pd.date_range('20130101', '20130102')
TimeSeries.from_times_and_values(index, self.pd_series1.values[:2])
with self.assertRaises(ValueError):
# all array must have same length
TimeSeries.from_times_and_values(self.pd_series1.index,
self.pd_series1.values[:-1],
self.pd_series2[:-2],
self.pd_series3[:-1])
# test if reordering is correct
rand_perm = np.random.permutation(range(1, 11))
index = pd.to_datetime(['201301{:02d}'.format(i) for i in rand_perm])
series_test = TimeSeries.from_times_and_values(index, self.pd_series1.values[rand_perm-1],
self.pd_series2[rand_perm-1],
self.pd_series3[rand_perm-1].tolist())
self.assertTrue(series_test.start_time() == pd.to_datetime('20130101'))
self.assertTrue(series_test.end_time() == pd.to_datetime('20130110'))
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
self.assertTrue(series_test.freq() == self.series1.freq())
# TODO test over to_dataframe when multiple features choice is decided
def test_eq(self):
seriesA: TimeSeries = TimeSeries(self.pd_series1)
self.assertTrue(self.series1 == seriesA)
# with a defined CI
seriesB: TimeSeries = TimeSeries(self.pd_series1,
confidence_hi=pd.Series(range(10, 20),
index=pd.date_range('20130101', '20130110')))
self.assertFalse(self.series1 == seriesB)
self.assertTrue(self.series1 != seriesB)
# with different dates
seriesC = TimeSeries(pd.Series(range(10), index=pd.date_range('20130102', '20130111')))
self.assertFalse(self.series1 == seriesC)
# compare with both CI
seriesD: TimeSeries = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
seriesE: TimeSeries = TimeSeries(self.pd_series1, self.pd_series3, self.pd_series2)
self.assertTrue(self.series2 == seriesD)
self.assertFalse(self.series2 == seriesE)
def test_dates(self):
self.assertEqual(self.series1.start_time(), pd.Timestamp('20130101'))
self.assertEqual(self.series1.end_time(), pd.Timestamp('20130110'))
self.assertEqual(self.series1.duration(), pd.Timedelta(days=9))
def test_slice(self):
# base case
seriesA = self.series1.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))
self.assertEqual(seriesA.start_time(), pd.Timestamp('20130104'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130107'))
# time stamp not in series
seriesB = self.series1.slice(pd.Timestamp('20130104 12:00:00'), pd.Timestamp('20130107'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))
self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))
# end timestamp after series
seriesC = self.series1.slice(pd.Timestamp('20130108'), pd.Timestamp('20130201'))
self.assertEqual(seriesC.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesC.end_time(), pd.Timestamp('20130110'))
# n points, base case
seriesD = self.series1.slice_n_points_after(pd.Timestamp('20130102'), n=3)
self.assertEqual(seriesD.start_time(), pd.Timestamp('20130102'))
self.assertTrue(len(seriesD.values()) == 3)
self.assertEqual(seriesD.end_time(), pd.Timestamp('20130104'))
seriesE = self.series1.slice_n_points_after(pd.Timestamp('20130107 12:00:10'), n=10)
self.assertEqual(seriesE.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesE.end_time(), pd.Timestamp('20130110'))
seriesF = self.series1.slice_n_points_before(pd.Timestamp('20130105'), n=3)
self.assertEqual(seriesF.end_time(), pd.Timestamp('20130105'))
self.assertTrue(len(seriesF.values()) == 3)
self.assertEqual(seriesF.start_time(), pd.Timestamp('20130103'))
seriesG = self.series1.slice_n_points_before(pd.Timestamp('20130107 12:00:10'), n=10)
self.assertEqual(seriesG.start_time(), pd.Timestamp('20130101'))
self.assertEqual(seriesG.end_time(), pd.Timestamp('20130107'))
# with CI
seriesH = self.series2.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))
self.assertEqual(seriesH.conf_lo_pd_series().index[0], | pd.Timestamp('20130104') | pandas.Timestamp |
#! env python
import requests
import re
import os.path as path
import time
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
####
# strikes and protests days:
# * https://es.wikipedia.org/wiki/Protestas_en_Argentina_de_2020
#
###
class Covid:
def __init__(self, covid_data=None):
self.covid_data = covid_data if covid_data is not None\
else 'https://en.wikipedia.org/wiki/Template:COVID-19_pandemic_data/Argentina_medical_cases'
self.tmp_covid_data = './raw_data_tmp'
self.special_dates = ['2020-03-23', '2020-03-24',
'2020-03-31', '2020-04-09',
'2020-04-10', '2020-04-24',
'2020-05-01', '2020-05-25',
'2020-06-15', '2020-07-09',
'2020-07-10', '2020-08-17',
'2020-10-12', '2020-11-23',
'2020-12-07', '2020-12-08',
'2020-12-25', '2020-06-20',
'2020-08-26', '2020-09-02',
'2020-09-13', '2020-09-19']
def proc(self):
covid = dict()
if not path.exists(self.tmp_covid_data) or (time.time() - int(path.getmtime(self.tmp_covid_data))) > 84600:
raw_data = requests.get(self.covid_data)
raw_data = raw_data.text
with open(self.tmp_covid_data, 'w') as fd:
fd.write(raw_data.encode('utf-8').decode('ascii', 'ignore'))
else:
with open(self.tmp_covid_data, 'r', encoding='utf-8') as fd:
raw_data = ""
for line in fd.readlines():
raw_data += line
line_flag = False
day_flag = False
day_counter = 0
date_tmp = ""
for line in raw_data.splitlines():
if re.search(r'Confirmed cumulative infections', line):
line_flag = True
if line_flag:
if re.search(r'wikitable', line):
line_flag = True
if line_flag:
rsearch = re.search(r'<th>(\d+ \w{3})\s*$', line)
if rsearch:
date_tmp = rsearch.group(1)
date_tmp = re.sub(r'(\d \D{3})', r'\1', date_tmp)
date_tmp = datetime.datetime.strptime(date_tmp + " 2020", "%d %b %Y").strftime("%Y-%m-%d")
covid[date_tmp] = dict()
day_flag = True
day_counter = 0
if day_flag:
day_counter += 1
if day_counter == 29:
tmp = re.sub(r'\D+(\d+)\D*', r'\1', line)
if not tmp.isdigit():
tmp = 0
covid[date_tmp]["C"] = tmp
elif day_counter == 30:
tmp = re.sub(r'\D+(\d+)\D*', r'\1', line)
if not tmp.isdigit():
tmp = 0
covid[date_tmp]["D"] = tmp
covid_dates = dict()
for t in covid.keys():
#covid[t]["date"] = t
if t in self.special_dates:
covid_dates[t] = dict()
covid_dates[t]["S"] = covid[t]['C']
covid_df = pd.DataFrame.from_dict(covid, dtype="int32")
covid_df = covid_df.transpose()
covid_df = covid_df.sort_index()
covid_df = covid_df.astype(int)
covid_df_dates = | pd.DataFrame.from_dict(covid_dates, dtype="int32") | pandas.DataFrame.from_dict |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from typing import Optional, Sequence, Union
from ..logger import get_logger
logger = get_logger(__name__)
class HierarchicalEncoder(BaseEstimator, TransformerMixin):
'''Hierarchical target encoding'''
def __init__(
self,
cols: Optional[Sequence[str]] = None,
sample_weight_col: Optional[str] = None,
C: int = 30,
disambiguate: bool = True,
verbose: bool = False,
) -> None:
self.C = C
self.cols = cols
self.sample_weight_col = sample_weight_col
self.disambiguate = disambiguate
self.verbose = verbose
def _check_params(self, X: pd.DataFrame) -> None:
if self.C <= 1:
raise ValueError(f'C={self.C} must be > 1')
def _disambiguate(self, X: pd.DataFrame, sep: str = '__') -> pd.DataFrame:
'''
Disambiguate hierarchical categorical columns,
f.e. distinguish Paris, US from Paris, France
by concatenating the parent categories values with child categories values.
Order of cols matters:
the feature at the beginning of the cols list is considered to be the parent feature.
F.e.: [country, city, street].
`sep` is used as a value separator in the concatenated values.
'''
for i, col in enumerate(self.cols):
if i > 0:
X[col] = X[col].astype('str') + sep + X[self.cols[i - 1]].astype('str')
return X
def fit(self, X: pd.DataFrame, y: Union[pd.Series, np.ndarray]) -> HierarchicalEncoder:
if self.cols is None:
self.cols = [c for c in X.columns if not c == self.sample_weight_col]
if self.sample_weight_col is None:
sample_weight = | pd.Series(1, index=X.index) | pandas.Series |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing, tree
from sklearn.pipeline import make_pipeline
import os.path
import os
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.neural_network import MLPClassifier
import pickle
def svm(X_train, X_test, y_train, y_test, to_save_dir):
clf = make_pipeline(preprocessing.StandardScaler(), SVC(gamma=0.01, degree=3, kernel='rbf'))
clf.fit(X_train, y_train)
predicted_y = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, predicted_y).ravel()
precision_score = tp / (tp + fp)
recall_score = tp / (tp + fn)
print(metrics.confusion_matrix(y_test, predicted_y))
print(metrics.classification_report(y_test, predicted_y))
report = metrics.classification_report(y_test, predicted_y, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(to_save_dir, 'svm.csv'))
pickle_dir = to_save_dir + "pickle_results\\"
with open(os.path.join(pickle_dir, 'svm.pkl'), 'wb') as f:
pickle.dump(clf, f)
def random_forest(X_train, X_test, y_train, y_test, to_save_dir):
clf = RandomForestClassifier(random_state=314)
clf.fit(X_train, y_train)
predicted_y = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, predicted_y).ravel()
precision_score = tp / (tp + fp)
recall_score = tp / (tp + fn)
print(metrics.confusion_matrix(y_test, predicted_y))
print(metrics.classification_report(y_test, predicted_y))
report = metrics.classification_report(y_test, predicted_y, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(to_save_dir, 'random_forest.csv'))
pickle_dir = to_save_dir + "pickle_results\\"
with open(os.path.join(pickle_dir, 'random_forest.pkl'), 'wb') as f:
pickle.dump(clf, f)
def decision_tree(X_train, X_test, y_train, y_test, to_save_dir):
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
predicted_y = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, predicted_y).ravel()
precision_score = tp / (tp + fp)
recall_score = tp / (tp + fn)
print(metrics.confusion_matrix(y_test, predicted_y))
print(metrics.classification_report(y_test, predicted_y))
report = metrics.classification_report(y_test, predicted_y, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(to_save_dir, 'decision_tree.csv'))
pickle_dir = to_save_dir + "pickle_results\\"
with open(os.path.join(pickle_dir, 'decision_tree.pkl'), 'wb') as f:
pickle.dump(clf, f)
def kNN(X_train, X_test, y_train, y_test, to_save_dir):
clf = KNeighborsClassifier(n_neighbors=5)
clf.fit(X_train, y_train)
predicted_y = clf.predict(X_test)
print(metrics.confusion_matrix(y_test, predicted_y))
print(metrics.classification_report(y_test, predicted_y))
report = metrics.classification_report(y_test, predicted_y, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(to_save_dir, 'kNN.csv'))
pickle_dir = to_save_dir + "pickle_results\\"
with open(os.path.join(pickle_dir, 'kNN.pkl'), 'wb') as f:
pickle.dump(clf, f)
def gradient_boost(X_train, X_test, y_train, y_test, to_save_dir):
clf = GradientBoostingClassifier(n_estimators=25, learning_rate=1.0, max_depth=20, random_state=0)
clf.fit(X_train, y_train)
predicted_y = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, predicted_y).ravel()
precision_score = tp / (tp + fp)
recall_score = tp / (tp + fn)
print(metrics.confusion_matrix(y_test, predicted_y))
print(metrics.classification_report(y_test, predicted_y))
report = metrics.classification_report(y_test, predicted_y, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(to_save_dir, 'gradient_boost.csv'))
pickle_dir = to_save_dir + "pickle_results\\"
with open(os.path.join(pickle_dir, 'gradient_boost.pkl'), 'wb') as f:
pickle.dump(clf, f)
def neural_net_mlp(X_train, X_test, y_train, y_test, to_save_dir):
clf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(14, 10 ), random_state=1, max_iter=300, warm_start=True)
clf.fit(X_train, y_train)
predicted_y = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, predicted_y).ravel()
precision_score = tp / (tp + fp)
recall_score = tp / (tp + fn)
print(metrics.confusion_matrix(y_test, predicted_y))
print(metrics.classification_report(y_test, predicted_y))
report = metrics.classification_report(y_test, predicted_y, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(to_save_dir, 'mlp.csv'))
pickle_dir = to_save_dir + "pickle_results\\"
with open(os.path.join(pickle_dir, 'mlp.pkl'), 'wb') as f:
pickle.dump(clf, f)
def encode_string_values(df):
for f in df.columns:
if df[f].dtype == 'object':
lbl_enc = preprocessing.LabelEncoder()
df[f] = lbl_enc.fit_transform(df[f].astype(str).values)
return df
def main():
trained_path = os.path.abspath(os.path.join(__file__, "../../..")) + "\\data\\trained\\"
models_dir = os.path.abspath(os.path.join(__file__, "../../..")) + "\\models\\"
dir = os.path.abspath(os.path.join(__file__, "../../..")) + "\\data\\datasets_with_features\\"
frames = []
attacks = ["apt_sim", "kerberoasting", "brute_force", "dc_shadow", "dc_sync",
"golden_ticket", "password_spraying", "remote_process_injection", "normal_events"]
for attack in attacks:
attack_path = dir + attack + ".csv"
frames.append(pd.read_csv(attack_path))
result = | pd.concat(frames) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
| tm.assert_equal(result, idx) | pandas.util.testing.assert_equal |
import codecs
import datetime
import json
import numbers
import warnings
import dill
import numpy as np
import pandas as pd
import pickle
from pymongo import MongoClient
import redis
from tabulate import tabulate
class Concordia():
def __init__(self, persistent_db_config=None, in_memory_db_config=None, default_row_id_field=None):
print('Welcome to Concordia! We\'ll do our best to take a couple stressors off your plate and give you more confidence in your machine learning systems in production.')
self.persistent_db_config = {
'host': 'localhost'
, 'port': 27017
, 'db': '_concordia'
}
if persistent_db_config is not None:
self.persistent_db_config.update(persistent_db_config)
self.in_memory_db_config = {
'host': 'localhost'
, 'port': 6379
, 'db': 0
}
if in_memory_db_config is not None:
self.in_memory_db_config.update(in_memory_db_config)
self._create_db_connections()
self.valid_prediction_types = set([str, int, float, list, 'int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'])
self.default_row_id_field = default_row_id_field
params_to_save = {
'persistent_db_config': self.persistent_db_config
, 'in_memory_db_config': self.in_memory_db_config
, 'default_row_id_field': self.default_row_id_field
}
self.insert_into_persistent_db(val=params_to_save, val_type='concordia_config', row_id='_intentionally_blank', model_id='_intentionally_blank')
def set_params(self, params_dict):
for k, v in params_dict.items():
setattr(self, k, v)
def _create_db_connections(self):
host = self.in_memory_db_config['host']
port = self.in_memory_db_config['port']
db = self.in_memory_db_config['db']
self.rdb = redis.StrictRedis(host=host, port=port, db=db)
host = self.persistent_db_config['host']
port = self.persistent_db_config['port']
db = self.persistent_db_config['db']
client = MongoClient(host=host, port=port)
self.mdb = client[db]
return self
# feature_importances is a dict, with keys as feature names, and values being the importance of each feature. it doesn't matter how the imoprtances are calculated, we'll just sort by those values
def add_model(self, model, model_id, feature_names=None, feature_importances=None, description=None, features_to_save='all'):
print('One thing to keep in mind is that each model_id must be unique in each db configuration. So if two Concordia instances are using the same database configurations, you should make sure their model_ids do not overlap.')
redis_key_model = self.make_redis_model_key(model_id)
stringified_model = codecs.encode(dill.dumps(model), 'base64').decode()
self.rdb.set(redis_key_model, stringified_model)
redis_key_features = self.make_redis_key_features(model_id)
stringified_features = json.dumps(features_to_save)
self.rdb.set(redis_key_features, stringified_features)
if feature_importances is not None:
if not isinstance(feature_importances, dict):
raise(TypeError('feature_importances must be a dict, where each key is a feature name, and each value is the importance of that feature'))
for k, v in feature_importances.items():
if isinstance(v, np.generic):
feature_importances[k] = np.asscalar(v)
mdb_doc = {
'val_type': 'model_info'
, 'model': stringified_model
, 'model_id': model_id
, 'feature_names': feature_names
, 'feature_importances': json.dumps(feature_importances)
, 'description': description
, 'date_added': datetime.datetime.now()
, 'features_to_save': stringified_features
}
self.insert_into_persistent_db(mdb_doc, val_type=mdb_doc['val_type'], row_id=mdb_doc['model_id'], model_id=mdb_doc['model_id'])
return self
def add_label(self, row_id, model_id, label):
label_doc = {
'row_id': row_id
, 'model_id': model_id
, 'label': label
}
if not isinstance(row_id, numbers.Number) and not isinstance(row_id, np.generic) and not isinstance(row_id, str):
if isinstance(model_id, str):
label_doc['model_id'] = [model_id for x in range(len(row_id))]
label_doc = pd.DataFrame(label_doc)
self.insert_into_persistent_db(val=label_doc, val_type='live_labels', row_id=label_doc['row_id'], model_id=label_doc['model_id'])
def list_all_models(self, verbose=True):
live_models = self.retrieve_from_persistent_db(val_type='model_info')
if verbose:
print('Here are all the models that have been added to concordia for live predictions:')
model_names = [x['model_id'] for x in live_models]
print(model_names)
for model_info in live_models:
del model_info['model']
return live_models
def retrieve_from_persistent_db(self, val_type, row_id=None, model_id=None, min_date=None, date_field=None):
if min_date is not None and date_field is None and not (isinstance(min_date, datetime.datetime) or isinstance(min_date, datetime.date)):
print('You have specified a min_date, but not a date_field')
print('Without the date_field specified, Concordia will query against the "_concordia_created_at" field, which is of type datetime.datetime.')
print('Therefore, your min_date must be of type datetime.datetime, but it is not right now. It is of type: '.format(type(min_date)))
raise(TypeError('min_date must be of type datetime if date_field is unspecified'))
query_params = {
'row_id': row_id
, 'model_id': model_id
}
if row_id is None:
del query_params['row_id']
if model_id is None:
del query_params['model_id']
if min_date is not None:
if date_field is None:
query_params['_concordia_created_at'] = {'$gte': min_date}
else:
query_params[date_field] = {'$gte': min_date}
result = self.mdb[val_type].find(query_params)
# Handle the case where we have multiple predictions from the same row, or any other instances where we have multiple results for the same set of ids
if isinstance(result, dict):
result = [result]
elif not isinstance(result, list):
result = list(result)
return result
def check_row_id(self, val, row_id, idx=None):
if row_id is None:
calculated_row_id = val.get(self.default_row_id_field, None)
if calculated_row_id is None:
print('You must pass in a row_id for anything that gets saved to the db.')
print('This input is missing a value for "row_id"')
if self.default_row_id_field is not None:
print('This input is also missing a value for "{}", the default_row_id_field'.format(self.default_row_id_field))
raise(ValueError('Missing "row_id" field'))
else:
row_id = calculated_row_id
assert row_id is not None
val['row_id'] = row_id
return val
def check_model_id(self, val, model_id, idx=None):
if isinstance(model_id, list):
model_id = model_id[idx]
if model_id is None:
calculated_model_id = val.get('model_id', None)
if calculated_model_id is None:
print('You must pass in a model_id for anything that gets saved to the db.')
print('This input is missing a value for "model_id"')
raise(ValueError('Missing "model_id" field'))
else:
model_id = calculated_model_id
assert model_id is not None
val['model_id'] = model_id
return val
def _insert_df_into_db(self, df, val_type, row_id, model_id):
df_cols = set(df.columns)
if 'row_id' not in df_cols:
if row_id is not None:
df['row_id'] = row_id
else:
if self.default_row_id_field not in df_cols:
print('You must pass in a row_id for anything that gets saved to the db.')
print('This input is missing a value for "row_id"')
if self.default_row_id_field is not None:
print('This input is also missing a value for "{}", the default_row_id_field'.format(self.default_row_id_field))
raise(ValueError('Missing "row_id" field'))
if 'model_id' not in df_cols:
if model_id is not None:
df['model_id'] = model_id
else:
print('You must pass in a model_id for anything that gets saved to the db.')
print('This input is missing a value for "model_id"')
raise(ValueError('Missing "model_id" field'))
chunk_min_idx = 0
chunk_size = 1000
while chunk_min_idx < df.shape[0]:
max_idx = min(df.shape[0], chunk_min_idx + chunk_size)
df_chunk = df.iloc[chunk_min_idx: max_idx]
df_chunk = df_chunk.to_dict('records')
self.mdb[val_type].insert_many(df_chunk)
del df_chunk
chunk_min_idx += chunk_size
def insert_into_persistent_db(self, val, val_type, row_id=None, model_id=None):
val = val.copy()
if '_id' in val:
del val['_id']
if '_id_' in val:
del val['_id_']
val['_concordia_created_at'] = datetime.datetime.utcnow()
if isinstance(val, dict):
val = self.check_row_id(val=val, row_id=row_id)
val = self.check_model_id(val=val, model_id=model_id)
for k, v in val.items():
if isinstance(v, np.generic):
val[k] = np.asscalar(v)
self.mdb[val_type].insert_one(val)
else:
self._insert_df_into_db(df=val, val_type=val_type, row_id=row_id, model_id=model_id)
return self
def make_redis_model_key(self, model_id):
return '_concordia_{}_{}'.format(model_id, 'model')
def _get_model(self, model_id):
redis_key_model = self.make_redis_model_key(model_id)
redis_result = self.rdb.get(redis_key_model)
if redis_result is 'None' or redis_result is None:
# Try to get it from MongoDB
mdb_result = self.retrieve_from_persistent_db(val_type='model_info', row_id=None, model_id=model_id)
if mdb_result is None or len(mdb_result) == 0:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('We could not find a corresponding model for model_id {}'.format(model_id))
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
error_string = 'We could not find a corresponding model for model_id {}'.format(model_id)
raise(ValueError(error_string))
else:
model = mdb_result[0]['model']
self.rdb.set(redis_key_model, model)
redis_result = self.rdb.get(redis_key_model)
redis_result = dill.loads(codecs.decode(redis_result, 'base64'))
return redis_result
def _get_features_to_save(self, model_id):
redis_key = self.make_redis_key_features(model_id)
redis_result = self.rdb.get(redis_key)
if redis_result is None or redis_result is 'None':
mdb_result = self.retrieve_from_persistent_db(val_type='model_info', row_id=None, model_id=model_id)
if mdb_result is None or len(mdb_result) == 0:
return 'all'
else:
try:
features = mdb_result[0]['features_to_save']
except KeyError:
features = json.dumps('all')
self.rdb.set(redis_key, features)
redis_result = self.rdb.get(redis_key)
if isinstance(redis_result, bytes):
redis_result = redis_result.decode('utf-8')
redis_result = json.loads(redis_result)
return redis_result
def make_redis_key_features(self, model_id):
return '_concordia_{}_{}'.format(model_id, 'features_to_save')
# This can handle both individual dictionaries and Pandas DataFrames as inputs
def add_data_and_predictions(self, model_id, features, predictions, row_ids, actuals=None):
if not isinstance(features, pd.DataFrame):
print('Training features must be a pandas DataFrame, not a {}'.format(type(features)))
raise(TypeError('Training features must be a pandas DataFrame'))
features = features.copy()
features['row_id'] = row_ids
features['model_id'] = model_id
features_to_save = self._get_features_to_save(model_id=model_id)
concordia_features_to_save = ['row_id', 'model_id']
if features_to_save == 'all':
features_to_save = list(features.columns)
else:
features_to_save = features_to_save + concordia_features_to_save
prediction_docs = []
for idx, pred in enumerate(predictions):
if type(pred) not in self.valid_prediction_types:
pred = list(pred)
pred_doc = {
'prediction': pred
, 'row_id': row_ids.iloc[idx]
, 'model_id': model_id
}
prediction_docs.append(pred_doc)
predictions_df = | pd.DataFrame(prediction_docs) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestDataFrameInterpolate:
def test_interpolate_inplace(self, frame_or_series, using_array_manager, request):
# GH#44749
if using_array_manager and frame_or_series is DataFrame:
mark = pytest.mark.xfail(reason=".values-based in-place check is invalid")
request.node.add_marker(mark)
obj = frame_or_series([1, np.nan, 2])
orig = obj.values
obj.interpolate(inplace=True)
expected = frame_or_series([1, 1.5, 2])
tm.assert_equal(obj, expected)
# check we operated *actually* inplace
assert np.shares_memory(orig, obj.values)
assert orig.squeeze()[1] == 1.5
def test_interp_basic(self):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
expected = DataFrame(
{
"A": [1.0, 2.0, 3.0, 4.0],
"B": [1.0, 4.0, 9.0, 9.0],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
result = df.interpolate()
tm.assert_frame_equal(result, expected)
result = df.set_index("C").interpolate()
expected = df.set_index("C")
expected.loc[3, "A"] = 3
expected.loc[5, "B"] = 9
tm.assert_frame_equal(result, expected)
def test_interp_empty(self):
# https://github.com/pandas-dev/pandas/issues/35598
df = DataFrame()
result = df.interpolate()
assert result is not df
expected = df
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# # Make the master table
import os
import sys
import pybedtools as pbt
import pandas as pd
import numpy as np
import subprocess as sp
import json
os.chdir('/mnt/BioHome/jreyna/jreyna/projects/dchallenge/')
pbt.set_bedtools_path('/mnt/BioApps/bedtools/bin/')
bgzip = '/mnt/BioApps/tabix/tabix-0.2.6/bgzip'
tabix = '/mnt/BioApps/tabix/tabix-0.2.6/tabix'
bedpe_6cols = ['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB']
bedpe_10cols = ['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB', 'name', 'score', 'strand1', 'strand2']
## default values for the command line
#sys.argv = [0] * 8
#sys.argv[1] = 'results/main/2021_Nikhil_eQTL/Results/Colocalization/T1D_34012112_Gaulton/'
#sys.argv[1] += 'BLUEPRINT_eQTL_Monocyte/FINAL_Summary_Coloc_Gene_SNP_Pairs.bed'
#sys.argv[2] = 'results/refs/ensembl/gencode.v19.annotation.bed'
#sys.argv[3] = 'results/main/2021_Nikhil_eQTL/Data/FitHiChIP_Loops/CM/FitHiChIP_L/FitHiChIP.interactions_FitHiC_Q0.01.bed'
#sys.argv[4] = 'results/refs/spp/SPP_D-Challenge_networks.xlsx'
#sys.argv[5] = 'results/refs/hg19/hg19.chrom.sizes'
#sys.argv[6] = 'results/main/2021_Nikhil_eQTL/Data/eqtl_sqtl_summ_stats/BLUEPRINT_eQTL/Monocyte.txt.gz'
#sys.argv[7] = 'results/main/loop_analysis/washU/'
# parsing the commandline arguments
coloc_fn = sys.argv[1]
genes_fn = sys.argv[2]
loop_fn = sys.argv[3]
spp_fn = sys.argv[4]
gs_fn = sys.argv[5]
eqtl_fn = sys.argv[6]
outdir = sys.argv[7]
# setting the output file names
os.makedirs(outdir, exist_ok=True)
# ## Load the colocalization data
# load the colocalization data
coloc = pd.read_table(coloc_fn)
# extract the most significant according the H4
coloc_sig_df = coloc[coloc['pp_H4_Coloc_Summary'] > 0.75]
coloc_sig_df.rename(columns={'pos': 'end'}, inplace=True)
coloc_sig_df.loc[:, 'start'] = coloc_sig_df.loc[:, 'end'] - 1
coloc_sig_full = coloc_sig_df.copy(deep=True)
coloc_sig_df = coloc_sig_df[['chr', 'start', 'end', 'rs_id', 'variant_id']]
coloc_sig_df = coloc_sig_df.loc[~coloc_sig_df.duplicated(subset='rs_id'),]
coloc_sig_pbt = pbt.BedTool.from_dataframe(coloc_sig_df.iloc[:, 0:4]).sort()
#csnp_slop_pbt = coloc_sig_pbt.slop(b=500000, g=gs_fn)
# ## Load the gene data
# load the gencode coords
cols = ['chrom', 'start', 'end', 'strand', 'type', 'gene_id', 'gene_name']
gencode = pd.read_table(genes_fn, header=None, names=cols)
# extract just the genes
genes_df = gencode.loc[gencode.type.isin(['gene'])]
genes_df = genes_df.loc[~genes_df.duplicated(subset='gene_id'), :]
genes_df.loc[:, 'chrom'] = genes_df['chrom'].astype(str)
genes_df = genes_df.iloc[:, [0,1,2,6,5]]
genes_pbt = pbt.BedTool.from_dataframe(genes_df).sort()
# ## Find the closest gene
closest_gene = coloc_sig_pbt.closest(genes_pbt, d=True)
closest_gene = closest_gene.to_dataframe()
closest_gene = closest_gene.iloc[:, [3,7,8,9]]
closest_gene.columns = ['rs_id', 'cls_gname', 'cls_id', 'cls_dist']
closest_gene.head()
uniq_cls_gname = closest_gene.groupby(['rs_id']).cls_gname.apply(lambda x: ','.join(x))
uniq_cls_ids = closest_gene.groupby(['rs_id']).cls_id.apply(lambda x: ','.join(x))
uniq_cls_dist = closest_gene.groupby(['rs_id']).cls_dist.apply(lambda x: ','.join([str(i) for i in x]))
uniq_cls = pd.merge(uniq_cls_gname, uniq_cls_ids, left_index=True, right_index=True)
uniq_cls = | pd.merge(uniq_cls, uniq_cls_dist, left_index=True, right_index=True) | pandas.merge |
import numpy as np
import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
def svd(M):
u,s,vt = np.linalg.svd(M,full_matrices=False)
V = vt.T
S = np.diag(s)
return np.dot(u[:,:2],np.dot(S[:2,:2],V[:,:2].T))
# def PCA(X,k=2):
# mu = X.mean(axis=1)
# C = np.dot(X.transpose(),X) / X.shape[0] - np.dot(mu.transpose(),mu)
# w, v = np.linalg.eig(C)
# idx = w.argsort()[::-1]
# eigenVectors = v[:, idx]
# return X @ eigenVectors[:,:k]
def plot_pca_embedding(embedding,fname,labels):
pca = PCA(n_components=2,)
embedding_points = pca.fit_transform(embedding)
x = embedding_points[:,0]
y = embedding_points[:,1]
new_x = []
new_y = []
z = []
for a,b,l in zip(x,y,labels):
if l > 0:
new_x.append(a)
new_y.append(b)
z.append(l)
cmap = matplotlib.cm.get_cmap('viridis')
z = np.log2(np.array(z)+1)
normalize = matplotlib.colors.Normalize(vmin=min(z),vmax=max(z))
colors = [cmap(normalize(value)) for value in z]
fig,ax = plt.subplots()
ax.scatter(new_x,new_y,color=colors,s=1)
cax,_ = matplotlib.colorbar.make_axes(ax)
cbar = matplotlib.colorbar.ColorbarBase(cax,cmap=cmap,norm=normalize)
plt.savefig(os.path.join(fname,'pca.png'))
plt.clf()
def plot_tsne_embedding(embedding,fname,labels):
tsne = TSNE(n_components=2,init='pca')
embedding_points = tsne.fit_transform(embedding)
x = embedding_points[:,0]
y = embedding_points[:,1]
new_x = []
new_y = []
z = []
for a,b,l in zip(x,y,labels):
if l > 0 :
new_x.append(a)
new_y.append(b)
z.append(l)
cmap = matplotlib.cm.get_cmap('viridis')
z = np.log2(np.array(z)+1)
normalize = matplotlib.colors.Normalize(vmin=min(z),vmax=max(z))
colors = [cmap(normalize(value)) for value in z]
fig,ax = plt.subplots()
ax.scatter(new_x,new_y,color=colors,s=1)
cax,_ = matplotlib.colorbar.make_axes(ax)
cbar = matplotlib.colorbar.ColorbarBase(cax,cmap=cmap,norm=normalize)
plt.savefig(os.path.join(fname,'tsne.png'))
plt.clf()
def plot_density(pred,gold,density,seen_idxs,fname):
gold_relation_length = defaultdict(lambda:0)
tp = defaultdict(lambda:0)
for p,g in zip(pred,gold):
gold_relation_length[g] += 1
if p == g:
tp[g] += 1
x = []
y = []
labels = []
for g in gold_relation_length:
x.append(density[g])
y.append(tp[g]*1./gold_relation_length[g])
labels.append(1 if g in seen_idxs else 0)
df = | pd.DataFrame() | pandas.DataFrame |
""" General functions for plotting PV data """
import json
from typing import List
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from nowcasting_dataset.data_sources.gsp.eso import get_gsp_metadata_from_eso
from nowcasting_dataset.data_sources.gsp.gsp_model import GSP
from nowcasting_dataset.geospatial import osgb_to_lat_lon
from plotly.subplots import make_subplots
from nowcasting_utils.visualization.line import make_trace
from nowcasting_utils.visualization.utils import make_buttons, make_slider
WGS84_CRS = "EPSG:4326"
def get_trace_centroid_gsp(gsp: GSP, example_index: int) -> go.Scatter:
"""Produce plot of centroid GSP"""
y = gsp.power_normalized[example_index, :, 0]
x = gsp.time[example_index]
return make_trace(x, y, truth=True, name="GSP", color="Blue")
def get_trace_all_gsps(
gsp: GSP, example_index: int, plot_other_gsp: bool = False
) -> List[go.Scatter]:
"""Produce plot of centroid GSP"""
traces = []
x = gsp.time[example_index]
if plot_other_gsp:
n_gsps = gsp.power_mw.shape[2]
# make the lines a little bit see-through
opacity = (1 / n_gsps) ** 0.25
for gsp_index in range(1, n_gsps):
y = gsp.power_normalized[example_index, :, gsp_index]
gsp_id = gsp.id[example_index, gsp_index].values
truth = False
if ~np.isnan(gsp_id):
gsp_id = int(gsp_id)
name = f"GSP {gsp_id}"
traces.append(
make_trace(
x, y, truth=truth, name=name, color="Green", opacity=opacity, mode="lines"
)
)
centroid_trace = get_trace_centroid_gsp(gsp=gsp, example_index=example_index)
centroid_trace["legendrank"] = 1
traces.append(centroid_trace)
return traces
def get_traces_gsp_intensity(gsp: GSP, example_index: int):
"""Get traces of pv intenisty map"""
time = gsp.time[example_index]
traces = []
for t_index in range(len(time)):
trace = get_trace_gsp_intensity_one_time_step(
gsp=gsp, example_index=example_index, t_index=t_index
)
traces.append(trace)
return traces
def get_trace_gsp_intensity_one_time_step(gsp: GSP, example_index: int, t_index: int):
"""Get trace of pv intensity map"""
time = gsp.time[example_index]
gsp_id = gsp.id[example_index, 0].values
name = str(time[t_index].data)
# get shape from eso
gsp_shape = get_gsp_metadata_from_eso().to_crs("EPSG:4326")
gsp_metadata = get_gsp_metadata_from_eso()
# select first GSP system
gsp_metadata = gsp_metadata[gsp_metadata["gsp_id"] == gsp_id]
gsp_shape = gsp_shape[gsp_shape.RegionID.isin(gsp_metadata.region_id)]
# add z axis for colour
gsp_shape["Amount"] = gsp.power_normalized[example_index, t_index, 0].values
# get json object
shapes_dict = json.loads(gsp_shape.to_json())
trace = go.Choroplethmapbox(
geojson=shapes_dict,
locations=gsp_shape.index,
z=gsp_shape.Amount,
colorscale="Viridis",
name=name,
)
return trace
def make_fig_of_animation_from_frames(traces, gsp: GSP, example_index: int):
"""Make animated fig form traces"""
frames = []
for i, trace in enumerate(traces[1:]):
frames.append(go.Frame(data=trace, name=f"frame{i+1}"))
# make slider
labels = [pd.to_datetime(time.data) for time in gsp.time[example_index]]
sliders = make_slider(labels=labels)
x = gsp.x_coords[example_index][gsp.x_coords[example_index] != 0].mean()
y = gsp.y_coords[example_index][gsp.y_coords[example_index] != 0].mean()
lat, lon = osgb_to_lat_lon(x=x, y=y)
fig = go.Figure(
data=traces[0],
layout=go.Layout(
title="Start Title",
),
frames=frames,
)
fig.update_layout(updatemenus=[make_buttons()])
fig.update_layout(
mapbox_style="carto-positron", mapbox_zoom=8, mapbox_center={"lat": lat, "lon": lon}
)
fig.update_layout(sliders=sliders)
return fig
def get_fig_gsp_combined(gsp: GSP, example_index: int):
"""
Create a combined plot
1. Plot the gsp intensity in time
2. Plot the gsp intensity with coords and animate in time
"""
traces_pv_intensity_in_time = get_trace_all_gsps(gsp=gsp, example_index=example_index)
traces_pv_intensity_map = get_traces_gsp_intensity(gsp=gsp, example_index=example_index)
x = gsp.x_coords[example_index][gsp.x_coords[example_index] != 0].mean()
y = gsp.y_coords[example_index][gsp.y_coords[example_index] != 0].mean()
lat, lon = osgb_to_lat_lon(x=x, y=y)
fig = make_subplots(
rows=1,
cols=2,
subplot_titles=("Map", "Time Series"),
specs=[
[{"type": "choroplethmapbox"}, {"type": "xy"}],
],
)
# add first animation plot
fig.add_trace(trace=traces_pv_intensity_map[0], row=1, col=1)
# add all time series plots
for trace in traces_pv_intensity_in_time:
fig.add_trace(trace, row=1, col=2)
n_traces = len(fig.data)
frames = []
static_traces = list(fig.data[1:])
for i, trace in enumerate(traces_pv_intensity_map):
frames.append(
dict(data=[trace] + static_traces, traces=list(range(n_traces)), name=f"frame{i}")
)
# make slider
labels = [ | pd.to_datetime(time.data) | pandas.to_datetime |
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X = pd.concat([X, pnorm_rank_df], axis=1)
del pnorm_rank_df
# calc discount rate
X["price"] = X["price"].astype(float)
prices_df = X[["impression", "price"]].groupby("impression").agg({'price': np.mean}).reset_index()
prices_df.columns = ["impression", "item_price_mean"]
X = pd.merge(X, prices_df, on="impression", how="left")
X["discount_rate"] = X["price"] / X["item_price_mean"]
del prices_df
# append some important props and other props with over 0.2 coverage
sum_item_props_df = dataset["sum_item_props_df"]
item_props = dataset["item_props"]
prop_cols = ["pGood Rating"
, "pVery Good Rating"
, "pExcellent Rating"
, "pSatisfactory Rating"
, "p1 Star"
, "p2 Star"
, "p3 Star"
, "p4 Star"
, "p5 Star"
, "pBusiness Centre"
, "pBusiness Hotel"
, "pConference Rooms"]
c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
prop_cols = prop_cols + c02over_prop_cols
prop_cols = list(set(prop_cols))
X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
X[prop_cols] = X[prop_cols].fillna(0)
return (X, extract_cols)
class DecisionMakingProcess(object):
@classmethod
def detect(cls, X, dataset):
print("... ... Decision Making Process")
print("... ... ... Attention and Perceptual Encoding")
print("... ... ... Information Acquisition and Evaluation")
all_df = dataset["all_df"]
# join pos stats"
copos_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "impressions", "is_y"]].copy()
copos_df = copos_df[copos_df.is_y == 0]
copos_df["impression"] = copos_df[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
copos_df["co_pos"] = copos_df[["impression", "reference"]].apply(
lambda x: x.impression.index(x.reference) + 1 if x.reference in x.impression else 1, axis=1)
copos_df_stats = copos_df[["session_id", "co_pos"]].groupby("session_id").agg(
{'co_pos': [np.min, np.max, np.mean]}).reset_index()
copos_df_stats.columns = ["session_id", "co_pos_min", "co_pos_max", "co_pos_mean"]
X = pd.merge(X, copos_df_stats, on="session_id", how="left")
X["co_pos_min"] = X["co_pos_min"].fillna(1)
X["co_pos_mean"] = X["co_pos_mean"].fillna(1)
X["co_pos_max"] = X["co_pos_max"].fillna(1)
X["co_pos_min_diff"] = X["pos"] - X["co_pos_min"]
X["co_pos_mean_diff"] = X["pos"] - X["co_pos_mean"]
X["clickouted_pos_max_diff"] = X["co_pos_max"] - X["pos"]
del copos_df
del copos_df_stats
# is_last and is_last_elapsed_time
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
lastref_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
lastref_df["is_target"] = 0
lastref_df.loc[lastref_df.is_y == 1, "is_target"] = 1
lastref_df = lastref_df[lastref_df.action_type.isin(action_types)]
lastref_df["last_session_id"] = lastref_df["session_id"].shift(1)
lastref_df["last_reference"] = lastref_df["reference"].shift(1)
lastref_df["last_timestamp"] = lastref_df["timestamp"].shift(1)
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_session_id]
lastref_df = lastref_df[lastref_df.is_target == 1][["session_id", "last_reference", "last_timestamp"]]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_reference"]] = X[["last_reference"]].fillna("-1")
X[["last_timestamp"]] = X[["last_timestamp"]].fillna(-1)
X["is_last"] = X[["impression", "last_reference"]].apply(lambda x: 1 if x.impression == x.last_reference else 0,
axis=1)
X["elapsed_time_between_is_last"] = X[["impression", "last_reference", "timestamp", "last_timestamp"]].apply(
lambda x: int(x.timestamp) - int(x.last_timestamp) if x.impression == x.last_reference else np.nan, axis=1)
lastdur_df = X[["session_id", "elapsed_time_between_is_last"]].copy()
lastdur_df = lastdur_df.dropna(axis=0, how='any')
X.drop("elapsed_time_between_is_last", axis=1, inplace=True)
X = pd.merge(X, lastdur_df, on="session_id", how="left")
del lastref_df
del lastdur_df
# join is_last_last
lastref_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
lastref_df["last_last_session_id"] = lastref_df["session_id"].shift(2)
lastref_df["last_last_reference"] = lastref_df["reference"].shift(2)
lastref_df = lastref_df[lastref_df.is_y == 1]
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_last_session_id]
lastref_df = lastref_df[["session_id", "last_last_reference"]]
lastref_df = lastref_df[~lastref_df.duplicated()]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_last_reference"]] = X[["last_last_reference"]].fillna("-1")
X["is_last_last"] = X[["impression", "last_last_reference"]].apply(
lambda x: 1 if x.impression == x.last_last_reference else 0, axis=1)
del lastref_df
# elapsed next mean by item "it's kind of a future information."
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
isnext_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
isnext_df["next_session_id"] = isnext_df["session_id"].shift(-1)
isnext_df["next_timestamp"] = isnext_df["timestamp"].shift(-1)
isnext_df = isnext_df[isnext_df.session_id == isnext_df.next_session_id]
isnext_df["elapsed_next"] = isnext_df["next_timestamp"] - isnext_df["timestamp"]
isnext_df = isnext_df[isnext_df.action_type.isin(action_types)]
isnext_df = isnext_df[isnext_df.is_y == 0]
isnext_gp_df = isnext_df[["reference", "elapsed_next"]].groupby("reference").agg(
{"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_gp_df
isnext_gp_df = isnext_df[isnext_df.action_type == "clickout item"][["reference", "elapsed_next"]].groupby(
"reference").agg({"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time_byco"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_df
del isnext_gp_df
# clickouted item during session
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
X = pd.merge(X, couted_df, on=["session_id", "impression"], how="left")
X["clickouted"] = X["clickouted"].fillna(0)
X["clickouted"] = X["clickouted"].astype(int)
# diff between clickouted price mean
co_price_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "prices", "impressions", "is_y"]].copy()
co_price_df = co_price_df[co_price_df.is_y == 0] # to prevent leakage
def get_price(reference, impressions, prices):
imps = str(impressions).split("|")
prs = str(prices).split("|")
if reference in imps:
return prs[imps.index(reference)]
else:
return 0
co_price_df["price"] = co_price_df.apply(lambda x: get_price(x.reference, x.impressions, x.prices), axis=1)
co_price_df["price"] = co_price_df["price"].astype(float)
co_price_df = co_price_df.groupby("session_id").agg({'price': np.mean}).reset_index()
co_price_df.columns = ["session_id", "couted_price_mean"]
X = pd.merge(X, co_price_df, on="session_id", how="left")
X["couted_price_mean"] = X["couted_price_mean"].fillna(-1)
X["clickouted_price_diff"] = X["price"].astype(float) / X["couted_price_mean"]
X.loc[X.clickouted_price_diff < 0, "clickouted_price_diff"] = 0
del co_price_df
# set two above displayed item and five below displayed item
u_cols = []
def set_undert_the_clickouted_and_islast(X, target_col, nu=5):
u_col = target_col + "_u"
X[u_col] = X["session_id"]
X.loc[X[target_col] != 1, u_col] = ""
for u in [_ for _ in range(-2, nu + 1, 1) if _ != 0]:
new_col = u_col + str(u).replace("-", "p")
X[new_col] = X[u_col].shift(u)
X[new_col] = X[new_col].fillna("")
X.loc[X[new_col] == X["session_id"], new_col] = "1"
X.loc[X[new_col] != "1", new_col] = 0
X.loc[X[new_col] == "1", new_col] = 1
u_cols.append(new_col)
X.drop(u_col, axis=1, inplace=True)
set_undert_the_clickouted_and_islast(X, "clickouted", 5)
set_undert_the_clickouted_and_islast(X, "is_last", 5)
# sum of number of above displayed item
u_coted_cols = [col for col in u_cols if "clickouted" in col]
u_islast_col = [col for col in u_cols if "is_last" in col]
X["clickouted_sum"] = X[u_coted_cols].sum(axis=1)
X["is_last_sum"] = X[u_islast_col].sum(axis=1)
# step_elapsed_mean which represents velocity of user activities.
selapsed_df = all_df[["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference"]].copy()
selapsed_df["pre_timestamp"] = selapsed_df["timestamp"].shift(1)
selapsed_df["pre_timestamp_dt"] = selapsed_df["timestamp_dt"].shift(1)
selapsed_df["pre_session_id"] = selapsed_df["session_id"].shift(1)
selapsed_df = selapsed_df[selapsed_df.session_id == selapsed_df.pre_session_id]
selapsed_df["elapsed"] = selapsed_df["timestamp"] - selapsed_df["pre_timestamp"]
selapsed_df = selapsed_df[["session_id", "elapsed"]]
selapsed_df = selapsed_df[selapsed_df.elapsed.notna()]
selapsed_df = selapsed_df[selapsed_df.elapsed > 0]
selapsed_df = selapsed_df.groupby("session_id").agg({"elapsed": np.mean}).reset_index()
selapsed_df.columns = ["session_id", "step_elapsed_mean"]
X = pd.merge(X, selapsed_df, on="session_id", how="left")
del selapsed_df
# last duration all "is it same as is_last_elapsed_time?"
lduration_all_df = all_df[["session_id", "action_type", "timestamp", "is_y"]].copy()
lduration_all_df["pre_timestamp"] = lduration_all_df["timestamp"].shift(1)
lduration_all_df["pre_session_id"] = lduration_all_df["session_id"].shift(1)
lduration_all_df = lduration_all_df[lduration_all_df.session_id == lduration_all_df.pre_session_id]
lduration_all_df["elapsed_time"] = lduration_all_df["timestamp"] - lduration_all_df["pre_timestamp"]
lduration_all_df = lduration_all_df[lduration_all_df.is_y == 1]
lduration_all_df = lduration_all_df[["session_id", "elapsed_time"]]
X = pd.merge(X, lduration_all_df, on="session_id", how="left")
del lduration_all_df
# first action_type
firsta_df = all_df[["session_id", "_session_id", "action_type", "is_y"]].copy()
firsta_df = firsta_df[firsta_df.is_y == 0] # to prevent leakage
firsta_df = firsta_df.groupby("_session_id").first().reset_index()
firsta_df = firsta_df.groupby("session_id").last().reset_index()
firsta_df.loc[firsta_df["action_type"] == "search for destination", "action_type"] = "fa_sfd"
firsta_df.loc[firsta_df["action_type"] == "interaction item image", "action_type"] = "fa_iii"
firsta_df.loc[firsta_df["action_type"] == "clickout item", "action_type"] = "fa_coi"
firsta_df.loc[firsta_df["action_type"] == "search for item", "action_type"] = "fa_sfi"
firsta_df.loc[firsta_df["action_type"] == "search for poi", "action_type"] = "fa_sfp"
firsta_df.loc[firsta_df["action_type"] == "change of sort order", "action_type"] = "fa_coso"
firsta_df.loc[firsta_df["action_type"] == "filter selection", "action_type"] = "fa_fis"
firsta_df.loc[firsta_df["action_type"] == "interaction item info", "action_type"] = "fa_iiinfo"
firsta_df.loc[firsta_df["action_type"] == "interaction item rating", "action_type"] = "fa_iirat"
firsta_df.loc[firsta_df["action_type"] == "interaction item deals", "action_type"] = "fa_iidea"
firsta_df = firsta_df[["session_id", "action_type"]]
firsta_df.columns = ["session_id", "at"]
onehot_firsta = pd.get_dummies(firsta_df, columns=['at'])
firsta_cols = list(onehot_firsta.columns)
firsta_cols.remove("session_id")
X = pd.merge(X, onehot_firsta, on="session_id", how="left")
for firsta_col in firsta_cols:
X[firsta_col] = X[firsta_col].fillna(0)
del firsta_df
del onehot_firsta
# price norm by item rating prop
X["r6"] = 0
X["r7"] = 0
X["r8"] = 0
X["r9"] = 0
X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
X.loc[X["pGood Rating"] == 1, "r7"] = 7
X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
X["rating"] = X["rating"].fillna(-1)
pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
float)
del pns_df
# price norm by star
X["star"] = -1
X.loc[X["p1 Star"] == 1, "star"] = 1
X.loc[X["p2 Star"] == 1, "star"] = 2
X.loc[X["p3 Star"] == 1, "star"] = 3
X.loc[X["p4 Star"] == 1, "star"] = 4
X.loc[X["p5 Star"] == 1, "star"] = 5
pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
float)
del pns_df
return X
class ByItem(object):
@classmethod
def set(cls, X, dataset):
print("... ... ByItem")
all_df = dataset["all_df"]
# imps score
impscore_df = dataset["impscore_df"]
item_props = dataset["item_props"]
X = pd.merge(X, impscore_df, on="impression", how="left")
X["impsocre"] = X["impsocre"].fillna(0)
# # append some important props and other props with over 0.2 coverage
# sum_item_props_df = dataset["sum_item_props_df"]
# prop_cols = ["pGood Rating"
# , "pVery Good Rating"
# , "pExcellent Rating"
# , "pSatisfactory Rating"
# , "p1 Star"
# , "p2 Star"
# , "p3 Star"
# , "p4 Star"
# , "p5 Star"
# , "pBusiness Centre"
# , "pBusiness Hotel"
# , "pConference Rooms"]
# c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
# prop_cols = prop_cols + c02over_prop_cols
# prop_cols = list(set(prop_cols))
# X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
# X[prop_cols] = X[prop_cols].fillna(0)
# append item svd n_components=10
item_props_svd = dataset["item_props_svd"]
prop_svd_cols = list(item_props_svd.columns)
prop_svd_cols.remove("item_id")
X = pd.merge(X, item_props_svd, left_on="impression", right_on="item_id", how="left")
X[prop_svd_cols] = X[prop_svd_cols].fillna(0)
# # price norm by item rating prop
# X["r6"] = 0
# X["r7"] = 0
# X["r8"] = 0
# X["r9"] = 0
# X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
# X.loc[X["pGood Rating"] == 1, "r7"] = 7
# X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
# X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
# X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
# lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
# X["rating"] = X["rating"].fillna(-1)
# pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
# pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
# X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
# float)
# del pns_df
#
# # price norm by star
# X["star"] = -1
# X.loc[X["p1 Star"] == 1, "star"] = 1
# X.loc[X["p2 Star"] == 1, "star"] = 2
# X.loc[X["p3 Star"] == 1, "star"] = 3
# X.loc[X["p4 Star"] == 1, "star"] = 4
# X.loc[X["p5 Star"] == 1, "star"] = 5
# pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
# pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
# X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
# float)
# del pns_df
# item ctr
ctrbyitem_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyitem_df = ctrbyitem_df[ctrbyitem_df.is_y == 0]
ref_df = ctrbyitem_df[["reference"]].groupby(["reference"]).size().reset_index()
ref_df.columns = ["impression", "rcnt"]
ref_df["ctrbyitem"] = ref_df["rcnt"].astype(float) / ref_df.shape[0]
ref_df = ref_df[["impression", "ctrbyitem"]]
X = pd.merge(X, ref_df, on="impression", how="left")
X["ctrbyitem"] = X["ctrbyitem"].fillna(0)
del ctrbyitem_df
del ref_df
# item ctr by city
cr_tmp_df = all_df[all_df.action_type == "clickout item"].copy()
cr_tmp_df = cr_tmp_df[cr_tmp_df.is_y == 0] # to prevent leakage
city_df = cr_tmp_df[["city"]].groupby(["city"]).size().reset_index()
city_df.columns = ["city", "ccnt"]
cityref_df = cr_tmp_df[["city", "reference"]].groupby(["city", "reference"]).size().reset_index()
cityref_df.columns = ["city", "impression", "rcnt"]
cityref_df = pd.merge(cityref_df, city_df, on="city", how="left")
cityref_df["ctrbycity"] = cityref_df["rcnt"].astype(float) / cityref_df["ccnt"].astype(float)
cityref_df = cityref_df[["city", "impression", "ctrbycity"]]
X = pd.merge(X, cityref_df, on=["city", "impression"], how="left")
X["ctrbycity"] = X["ctrbycity"].fillna(0)
del cr_tmp_df
del city_df
del cityref_df
# item ctr by city rank
ctrbycity_rank_df = X[["session_id", "ctrbycity"]].copy()
ctrbycity_rank_df = ctrbycity_rank_df[["session_id", "ctrbycity"]].groupby("session_id").rank(ascending=False)
ctrbycity_rank_df.columns = ["ctrbycity_rank"]
X = pd.concat([X, ctrbycity_rank_df], axis=1)
del ctrbycity_rank_df
# bayes likelihood by item
bayes_likelihood = dataset["bayes_likelihood"]
X["rlr"] = X["impression"].astype(str) + X["last_reference"].astype(str)
def set_bayes_li(rlr):
if rlr in bayes_likelihood:
return bayes_likelihood[rlr]
return 0.0
X["bayes_li"] = X[["rlr"]].apply(lambda x: set_bayes_li(x.rlr), axis=1)
# clickouted item 2 item during session
v2v_counter = dataset["v2v_counter"]
def extract_sv2v_counter(iids):
v = {}
for iid in iids:
if iid in v2v_counter:
for s in v2v_counter[iid]:
if not s in v:
v[s] = v2v_counter[iid][s]
return v
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
sv2v_df = couted_df.groupby("session_id").apply(
lambda x: extract_sv2v_counter(list(x.impression))).reset_index()
sv2v_df.columns = ["session_id", "sv2v"]
X = pd.merge(X, sv2v_df, on="session_id", how="left")
X["sv2v"] = X["sv2v"].fillna("{}")
X["sv2v_score"] = X[["impression", "sv2v"]].apply(
lambda x: x.sv2v[x.impression] if x.impression in x.sv2v else np.nan, axis=1)
X.drop("sv2v", axis=1, inplace=True)
sv2vs_stats = X.groupby("session_id").agg({"sv2v_score": [np.mean, np.std]}).reset_index()
sv2vs_stats.columns = ["session_id", "sv2v_score_mean", "sv2v_score_std"]
X = pd.merge(X, sv2vs_stats, on="session_id", how="left")
X["sv2v_score_norm"] = X["sv2v_score"] - X["sv2v_score_mean"] / X["sv2v_score_std"]
del couted_df
del sv2v_df
del sv2vs_stats
# some action_types are already done by each item during each session
couted_df = all_df[["action_type", "session_id", "reference"]].copy()
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
ated_cols = ["iired"
, "iifed"
, "iiied"
, "iided"
, "sfied"]
for i, action_type in enumerate(action_types):
at_df = couted_df[couted_df.action_type == action_type].copy()
at_df = at_df[["session_id", "reference"]]
at_df.columns = ["session_id", "impression"]
at_df = at_df[~at_df.duplicated()]
at_df[ated_cols[i]] = 1
X = pd.merge(X, at_df, on=["session_id", "impression"], how="left")
X[ated_cols[i]] = X[ated_cols[i]].fillna(0)
X[ated_cols[i]] = X[ated_cols[i]].astype(int)
del at_df
del couted_df
# dropout rate by each item during each session
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(["interaction item image", "clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df["action_type"] == "interaction item image", "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
def is_dropout(iii, cko):
if iii != 0 and cko != 0:
return 0
elif iii != 0 and cko == 0:
return 1
else:
return -1
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# dropout rate by each item during all sessions
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(action_types + ["clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df.action_type.isin(action_types), "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "all_dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# action_type rate by each item
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atstats_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
atstats_df = atstats_df[atstats_df.action_type.isin(action_types)]
atstats_df = atstats_df[atstats_df.is_y == 0] # to prevent leakage
atstats_df = atstats_df[["reference", "action_type"]].groupby(["reference", "action_type"]).size().reset_index()
atstats_df.columns = ["reference", "action_type", "at_cnt"]
atstats_refcnt_df = atstats_df[["reference", "at_cnt"]].groupby("reference").sum().reset_index()
atstats_refcnt_df.columns = ["reference", "rf_cnt"]
atstats_df = pd.merge(atstats_df, atstats_refcnt_df, on="reference", how="left")
atstats_df["at_rate"] = atstats_df["at_cnt"].astype(float) / atstats_df["rf_cnt"]
atstats_df = atstats_df.pivot(index='reference', columns='action_type', values='at_rate').reset_index()
at_rate_cols = ["co_at_rate", "iid_at_rate", "iii_at_rate", "iif_at_rate", "iir_at_rate", "sfi_at_rate"]
atstats_df.columns = ["impression"] + at_rate_cols
atstats_df = atstats_df.fillna(0)
X = pd.merge(X, atstats_df, on="impression", how="left")
for at_rate_col in at_rate_cols:
X[at_rate_col] = X[at_rate_col].fillna(0)
del atstats_df
# action_type rate in-session rank by each item
at_rate_cols = ["co_at_rate"
, "iid_at_rate"
, "iii_at_rate"
, "iif_at_rate"
, "iir_at_rate"
, "sfi_at_rate"]
at_rank_cols = []
for at_rate_col in at_rate_cols:
at_rank_col = at_rate_col + "_rank"
at_rank_cols.append(at_rank_col)
at_rank_df = X[["session_id", at_rate_col]].copy()
at_rank_df = at_rank_df[["session_id", at_rate_col]].groupby("session_id").rank(ascending=False)
at_rank_df.columns = [at_rank_col]
X = pd.concat([X, at_rank_df], axis=1)
del at_rank_df
# reference_elapsed_mean and by action_type
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
relapsed_df = all_df[
["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference", "is_y"]].copy()
relapsed_df["pre_timestamp"] = relapsed_df["timestamp"].shift(1)
relapsed_df["pre_timestamp_dt"] = relapsed_df["timestamp_dt"].shift(1)
relapsed_df["pre_session_id"] = relapsed_df["session_id"].shift(1)
relapsed_df = relapsed_df[relapsed_df.session_id == relapsed_df.pre_session_id]
relapsed_df["elapsed"] = relapsed_df["timestamp"] - relapsed_df["pre_timestamp"]
relapsed_df = relapsed_df[relapsed_df.action_type.isin(action_types)]
relapsed_df = relapsed_df[relapsed_df.is_y == 0] # to prevent leakage
relapsed_df = relapsed_df[relapsed_df.elapsed.notna()]
relapsed_df = relapsed_df[relapsed_df.elapsed > 0]
r_relapsed_df = relapsed_df[["reference", "elapsed"]].groupby("reference").agg(
{"elapsed": np.mean}).reset_index()
r_relapsed_rate_cols = ["ref_elapsed_mean"]
r_relapsed_df.columns = ["impression"] + r_relapsed_rate_cols
a_relapsed_df = relapsed_df[["reference", "action_type", "elapsed"]].groupby(["reference", "action_type"]).agg(
{"elapsed": np.mean}).reset_index()
a_relapsed_df.columns = ["reference", "action_type", "at_elapsed_mean"]
a_relapsed_df = a_relapsed_df.pivot(index='reference', columns='action_type',
values='at_elapsed_mean').reset_index()
a_relapsed_rate_cols = ["co_ref_elapsed_mean", "iid_ref_elapsed_mean", "iii_ref_elapsed_mean",
"iif_ref_elapsed_mean", "iir_ref_elapsed_mean", "sfi_ref_elapsed_mean"]
a_relapsed_df.columns = ["impression"] + a_relapsed_rate_cols
X = pd.merge(X, r_relapsed_df, on="impression", how="left")
X = pd.merge(X, a_relapsed_df, on="impression", how="left")
del relapsed_df
del r_relapsed_df
del a_relapsed_df
# tsh "time split by hour" item ctr
tsh_df = all_df[all_df.action_type == "clickout item"][
["session_id", "action_type", "reference", "timestamp_dt", "is_y"]].copy()
tsh_df["tsh24"] = -1
X["tsh24"] = -1
ts_min = tsh_df["timestamp_dt"].min()
ts_max = tsh_df["timestamp_dt"].max()
def set_tscol(hours):
tscol = "tsh" + str(hours)
ts_start = ts_min
ts_end = ts_start + datetime.timedelta(hours=hours)
ts_bin = 1
while True:
tsh_df.loc[(tsh_df.timestamp_dt >= ts_start) & (tsh_df.timestamp_dt < ts_end), tscol] = ts_bin
X.loc[(X.timestamp_dt >= ts_start) & (X.timestamp_dt < ts_end), tscol] = ts_bin
ts_start = ts_end
ts_end = ts_start + datetime.timedelta(hours=hours)
if ts_start > ts_max:
break
ts_bin += 1
set_tscol(24)
tsh_df = tsh_df[tsh_df.is_y == 0]
tsh24_df = tsh_df[["tsh24"]].groupby(["tsh24"]).size().reset_index()
tsh24_df.columns = ["tsh24", "allcnt"]
tsh24ref_df = tsh_df[["tsh24", "reference"]].groupby(["tsh24", "reference"]).size().reset_index()
tsh24ref_df.columns = ["tsh24", "impression", "rcnt"]
tsh24ref_df = pd.merge(tsh24ref_df, tsh24_df, on="tsh24", how="left")
tsh24ref_df["ctrbytsh24"] = tsh24ref_df["rcnt"].astype(float) / tsh24ref_df["allcnt"].astype(float)
tsh24ref_df = tsh24ref_df[["tsh24", "impression", "ctrbytsh24"]]
X = pd.merge(X, tsh24ref_df, on=["tsh24", "impression"], how="left")
X["ctrbytsh24"] = X["ctrbytsh24"].fillna(0)
del tsh_df
del tsh24_df
del tsh24ref_df
# item ctr by some props
ctrbyprops_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyprops_df.columns = ["session_id", "item_id", "is_y"]
star_cols = ["p1 Star", "p2 Star", "p3 Star", "p4 Star", "p5 Star"]
rating_cols = ["pSatisfactory Rating", "pGood Rating", "pVery Good Rating", "pExcellent Rating"]
ctrbyprops_df = pd.merge(ctrbyprops_df, item_props[["item_id"] + star_cols + rating_cols], on="item_id",
how="left")
ctrbyprops_df["star"] = -1
ctrbyprops_df.loc[ctrbyprops_df["p1 Star"] == 1, "star"] = 1
ctrbyprops_df.loc[ctrbyprops_df["p2 Star"] == 1, "star"] = 2
ctrbyprops_df.loc[ctrbyprops_df["p3 Star"] == 1, "star"] = 3
ctrbyprops_df.loc[ctrbyprops_df["p4 Star"] == 1, "star"] = 4
ctrbyprops_df.loc[ctrbyprops_df["p5 Star"] == 1, "star"] = 5
ctrbyprops_df["r6"] = 0
ctrbyprops_df["r7"] = 0
ctrbyprops_df["r8"] = 0
ctrbyprops_df["r9"] = 0
ctrbyprops_df.loc[ctrbyprops_df["pSatisfactory Rating"] == 1, "r6"] = 6
ctrbyprops_df.loc[ctrbyprops_df["pGood Rating"] == 1, "r7"] = 7
ctrbyprops_df.loc[ctrbyprops_df["pVery Good Rating"] == 1, "r8"] = 8
ctrbyprops_df.loc[ctrbyprops_df["pExcellent Rating"] == 1, "r9"] = 9
ctrbyprops_df["rating"] = ctrbyprops_df[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
ctrbyprops_df["rating"] = ctrbyprops_df["rating"].fillna(-1)
ctrbyprops_df["star_rating"] = "sr_" + ctrbyprops_df["star"].astype(str) + "_" + ctrbyprops_df["rating"].astype(
str)
ctrbyprops_df = ctrbyprops_df[["session_id", "star_rating", "item_id", "is_y"]]
ctrbyprops_df = ctrbyprops_df[ctrbyprops_df.is_y == 0] # to prevent leakage
ctrbyprops_df = ctrbyprops_df[["item_id", "star_rating"]]
ctrbyprops_df.columns = ["impression", "star_rating"]
prop_df = ctrbyprops_df[["star_rating"]].groupby(["star_rating"]).size().reset_index()
prop_df.columns = ["star_rating", "allcnt"]
propref_df = ctrbyprops_df[["star_rating", "impression"]].groupby(
["star_rating", "impression"]).size().reset_index()
propref_df.columns = ["star_rating", "impression", "rcnt"]
propref_df = pd.merge(propref_df, prop_df, on="star_rating", how="left")
propref_df["ctrbyprops"] = propref_df["rcnt"].astype(float) / propref_df["allcnt"].astype(float)
propref_df = propref_df[["star_rating", "impression", "ctrbyprops"]]
X["star_rating"] = "sr_" + X["star"].astype(str) + "_" + X["rating"].astype(str)
X = pd.merge(X, propref_df, on=["star_rating", "impression"], how="left")
X["ctrbyprops"] = X["ctrbyprops"].fillna(0)
del ctrbyprops_df
del prop_df
del propref_df
# is no serach item
action_types = ["clickout item"]
is_nosi_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
is_nosi_df = is_nosi_df.groupby("session_id").first().reset_index()
is_nosi_df = is_nosi_df[(is_nosi_df.action_type.isin(action_types)) & (is_nosi_df.is_y == 0)]
is_nosi_df = is_nosi_df[["reference"]].groupby("reference").size().reset_index()
is_nosi_df.columns = ["impression", "nosearch_cnt"]
X = pd.merge(X, is_nosi_df, on="impression", how="left")
X["nosearch_cnt"] = X["nosearch_cnt"].fillna(0)
del is_nosi_df
return X
class BySession(object):
@classmethod
def set(cls, X, dataset):
print("... ... BySession as Motivation")
all_df = dataset["all_df"]
# item ratio of appearance by each session
def get_precnt_ratio(x):
pre_references = str(x.pre_references).split("|")
len_pre_ref = len(pre_references)
if len_pre_ref != 0:
return np.float(pre_references.count(x.impression)) / len_pre_ref
return 0
preref_df = all_df[all_df.action_type != "clickout item"].groupby("session_id").apply(
lambda x: "|".join([r for r in list(x.reference) if str.isnumeric(r)])).reset_index()
preref_df.columns = ["session_id", "pre_references"]
X = pd.merge(X, preref_df, on="session_id", how="left")
X[["pre_references"]] = X[["pre_references"]].fillna("")
X["precnt_ratio"] = X[["impression", "pre_references"]].apply(lambda x: get_precnt_ratio(x), axis=1)
del preref_df
# action_type ratio of appearance by each session
atype_long_names = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atype_short_names = ["interaction_item_rating_ratio"
, "iif_ratio"
, "iii_ratio"
, "iid_ratio"
, "sfi_ratio"
, "co_ratio"]
preref_df2 = all_df[all_df.action_type.isin(atype_long_names)][
["session_id", "reference", "action_type", "is_y"]].copy()
preref_df2 = preref_df2[preref_df2.is_y == 0] # to prevent leakage
preref_df2 = preref_df2[["session_id", "reference", "action_type"]]
preref_df3 = preref_df2[["session_id"]].groupby("session_id").size().reset_index()
preref_df3.columns = ["session_id", "cnt"]
preref_df2 = pd.get_dummies(preref_df2, columns=['action_type'])
preref_df2 = preref_df2.groupby(["session_id", "reference"]).sum().reset_index()
preref_df2.columns = ["session_id", "impression"] + atype_short_names
preref_df2 = pd.merge(preref_df2, preref_df3, on="session_id", how="left")
preref_df2[atype_short_names] = preref_df2[atype_short_names].astype(float)
for atype_short_name in atype_short_names:
preref_df2[atype_short_name] = preref_df2[atype_short_name] / preref_df2["cnt"]
X = pd.merge(X, preref_df2, on=["session_id", "impression"], how="left")
del preref_df2
del preref_df3
# # clickouted item 2 item during session
# v2v_counter = dataset["v2v_counter"]
# def extract_sv2v_counter(iids):
# v = {}
# for iid in iids:
# if iid in v2v_counter:
# for s in v2v_counter[iid]:
# if not s in v:
# v[s] = v2v_counter[iid][s]
# return v
#
# couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
# couted_df = couted_df[couted_df.action_type == "clickout item"]
# couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
# couted_df = couted_df[["session_id", "reference"]]
# couted_df.columns = ["session_id", "impression"]
# couted_df = couted_df[~couted_df.duplicated()]
# couted_df["clickouted"] = 1
# sv2v_df = couted_df.groupby("session_id").apply(
# lambda x: extract_sv2v_counter(list(x.impression))).reset_index()
# sv2v_df.columns = ["session_id", "sv2v"]
# X = pd.merge(X, sv2v_df, on="session_id", how="left")
# X["sv2v"] = X["sv2v"].fillna("{}")
# X["sv2v_score"] = X[["impression", "sv2v"]].apply(
# lambda x: x.sv2v[x.impression] if x.impression in x.sv2v else np.nan, axis=1)
# X.drop("sv2v", axis=1, inplace=True)
# sv2vs_stats = X.groupby("session_id").agg({"sv2v_score": [np.mean, np.std]}).reset_index()
# sv2vs_stats.columns = ["session_id", "sv2v_score_mean", "sv2v_score_std"]
# X = pd.merge(X, sv2vs_stats, on="session_id", how="left")
# X["sv2v_score_norm"] = X["sv2v_score"] - X["sv2v_score_mean"] / X["sv2v_score_std"]
# del couted_df
# del sv2v_df
# del sv2vs_stats
# is zero interactions
zeroit_df = all_df[["session_id"]].groupby("session_id").size().reset_index()
zeroit_df.columns = ["session_id", "it_count"]
zeroit_df["is_zeroit"] = zeroit_df[["it_count"]].apply(lambda x: 1 if x.it_count == 1 else 0, axis=1)
X = pd.merge(X, zeroit_df, on="session_id", how="left")
del zeroit_df
# # first action_type
# firsta_df = all_df[["session_id", "_session_id", "action_type", "is_y"]].copy()
# firsta_df = firsta_df[firsta_df.is_y == 0] # to prevent leakage
# firsta_df = firsta_df.groupby("_session_id").first().reset_index()
# firsta_df = firsta_df.groupby("session_id").last().reset_index()
# firsta_df.loc[firsta_df["action_type"] == "search for destination", "action_type"] = "fa_sfd"
# firsta_df.loc[firsta_df["action_type"] == "interaction item image", "action_type"] = "fa_iii"
# firsta_df.loc[firsta_df["action_type"] == "clickout item", "action_type"] = "fa_coi"
# firsta_df.loc[firsta_df["action_type"] == "search for item", "action_type"] = "fa_sfi"
# firsta_df.loc[firsta_df["action_type"] == "search for poi", "action_type"] = "fa_sfp"
# firsta_df.loc[firsta_df["action_type"] == "change of sort order", "action_type"] = "fa_coso"
# firsta_df.loc[firsta_df["action_type"] == "filter selection", "action_type"] = "fa_fis"
# firsta_df.loc[firsta_df["action_type"] == "interaction item info", "action_type"] = "fa_iiinfo"
# firsta_df.loc[firsta_df["action_type"] == "interaction item rating", "action_type"] = "fa_iirat"
# firsta_df.loc[firsta_df["action_type"] == "interaction item deals", "action_type"] = "fa_iidea"
# firsta_df = firsta_df[["session_id", "action_type"]]
# firsta_df.columns = ["session_id", "at"]
# onehot_firsta = pd.get_dummies(firsta_df, columns=['at'])
# firsta_cols = list(onehot_firsta.columns)
# firsta_cols.remove("session_id")
# X = pd.merge(X, onehot_firsta, on="session_id", how="left")
# for firsta_col in firsta_cols:
# X[firsta_col] = X[firsta_col].fillna(0)
# del firsta_df
# del onehot_firsta
# unique reference ratio during session
uniqueref_df = all_df[["session_id", "reference", "action_type", "is_y"]].copy()
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
uniqueref_df = uniqueref_df[uniqueref_df.action_type.isin(action_types)]
uniqueref_df = uniqueref_df[uniqueref_df.is_y == 0] # to prevent leakage
uniqueref_df = uniqueref_df[["session_id", "reference"]].groupby("session_id").apply(
lambda x: len(set(list(x.reference))) / len(list(x.reference))).reset_index()
uniqueref_df.columns = ["session_id", "uniqueref_ratio"]
X = pd.merge(X, uniqueref_df, on="session_id", how="left")
del uniqueref_df
# number of action_types during session
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atcnt_cols = ["iir_cnt"
, "iif_cnt"
, "iii_cnt"
, "iid_cnt"
, "sfi_cnt"
, "co_cnt"]
cocnt_df = all_df[all_df.action_type.isin(action_types)][["session_id", "action_type", "is_y"]].copy()
cocnt_df = cocnt_df[cocnt_df.is_y == 0] # to prevent leakage
for i, action_type in enumerate(action_types):
cnt_df = cocnt_df[cocnt_df.action_type == action_type][["session_id"]].copy()
cnt_df = cnt_df[["session_id"]].groupby("session_id").size().reset_index()
cnt_df.columns = ["session_id", atcnt_cols[i]]
X = pd.merge(X, cnt_df, on="session_id", how="left")
X[atcnt_cols[i]] = X[atcnt_cols[i]].fillna(0)
X[atcnt_cols[i]] = X[atcnt_cols[i]].astype(int)
del cnt_df
del cocnt_df
# # last duration all "is it same as is_last_elapsed_time?"
# lduration_all_df = all_df[["session_id", "action_type", "timestamp", "is_y"]].copy()
# lduration_all_df["pre_timestamp"] = lduration_all_df["timestamp"].shift(1)
# lduration_all_df["pre_session_id"] = lduration_all_df["session_id"].shift(1)
# lduration_all_df = lduration_all_df[lduration_all_df.session_id == lduration_all_df.pre_session_id]
# lduration_all_df["elapsed_time"] = lduration_all_df["timestamp"] - lduration_all_df["pre_timestamp"]
# lduration_all_df = lduration_all_df[lduration_all_df.is_y == 1]
# lduration_all_df = lduration_all_df[["session_id", "elapsed_time"]]
# X = pd.merge(X, lduration_all_df, on="session_id", how="left")
# del lduration_all_df
# click out cnt by all and to summarize by session")
cocntbyss_df = all_df[(all_df.action_type == "clickout item") & (all_df.is_y == 0)][["reference"]].copy()
cocnt_df = cocntbyss_df.groupby(["reference"]).size().reset_index()
cocnt_df.columns = ["impression", "cocntall"]
X = pd.merge(X, cocnt_df, on="impression", how="left")
X["cocntall"] = X["cocntall"].fillna(0)
cocntbyday_stats_df = X[["session_id", "cocntall"]].groupby("session_id").agg(
{'cocntall': [np.mean, np.std]}).reset_index()
cocntbyday_stats_df.columns = ["session_id", "cocntall_mean", "cocntall_std"]
X = pd.merge(X, cocntbyday_stats_df, on="session_id", how="left")
X["cocntall_norm"] = (X["cocntall"] - X["cocntall_mean"]) / X["cocntall_std"]
del cocntbyss_df
del cocnt_df
del cocntbyday_stats_df
return X
class EncodingForCategories(object):
@classmethod
def to_prob(cls, X, dataset):
print("... ... EncodingForCategories")
all_df = dataset["all_df"]
# city prob
city_df = all_df[["city"]].copy()
city_vc_df = city_df["city"].value_counts().reset_index()
city_vc_df.columns = ["city", "cnt"]
city_vc_df["city_prob"] = city_vc_df["cnt"].astype(float) / city_df.shape[0]
city_vc_df = city_vc_df[["city", "city_prob"]]
X = pd.merge(X, city_vc_df, on="city", how="left")
del city_df
del city_vc_df
# platform prob
plt_df = all_df[["platform"]].copy()
plt_vc_df = plt_df["platform"].value_counts().reset_index()
plt_vc_df.columns = ["platform", "cnt"]
plt_vc_df["platform_prob"] = plt_vc_df["cnt"].astype(float) / plt_df.shape[0]
plt_vc_df = plt_vc_df[["platform", "platform_prob"]]
X = | pd.merge(X, plt_vc_df, on="platform", how="left") | pandas.merge |
# ! /usr/bin/env python3
# -*- coding: utf-8 -*-
# AUTHOR: <NAME>, EMBL
# converter.py
# To further usage, use manual supplied together with CLEMSite
import numpy as np
from sklearn.manifold import TSNE
import pylab as Plot
import pandas as pd
import glob
from os import listdir
from os.path import isfile, join
import os, shutil
import seaborn as sns
from tqdm import tqdm
import matplotlib.patches as mpatches
import holoviews as hv
from holoviews import streams
hv.extension('bokeh')
import cv2
import time
from skimage import data, exposure, img_as_float
############################################## LOADING DATA ############################################
### Set here treatment names
_treatments = ["COPB2", "WDR75", "DNM1", "COPG1", "C1S", "DENND4C", "IPO8", "SRSF1", "Neg9", "FAM177B", "ACTR3",
"PTBP1", "DNM1", "NT5C", "PTBP1", "ARHGAP44", "Neg9", "ACTR3", "SRSF1", "C1S", "IPO8", "WDR75", "NT5C",
"FAM177B", "COPB1", "ARHGAP44", "Neg9", "GPT", "KIF11", "GPT", "DENND4C", "AURKB"]
_features = ['Metadata_BaseFileName', 'FileName_ColorImage', 'Location_Center_X', 'Location_Center_Y',
'Mean_Golgi_AreaShape_Center_X', 'Mean_Golgi_AreaShape_Center_Y', 'Mean_Nuclei_AreaShape_Solidity',
'Metadata_U', 'Metadata_V', 'Metadata_X', 'Metadata_Y', 'ImageQuality_PowerLogLogSlope_Dna',
'Intensity_IntegratedIntensity_GolgiBGCorr', 'Mean_Nuclei_Math_CV', 'Math_Blobness', 'Math_Diffuseness',
'Children_Golgi_Count', 'Mean_MaxAreaGolgi_AreaShape_FormFactor']
treatment_column = 'Gene'
treatment_index = 'Metadata_Y'
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
########################################################################################################
def loadData(regenerate_crops=True, no_treatments=False, subfolder=None):
"""
regenerate_crops = rewrites crops. Put it to false to save computation time if you are reloading the data from a previous selection.
no_treatment = if no treatments and there is a full population of cells.
"""
data_folder = os.getcwd()
if subfolder:
data_folder = os.path.join(data_folder, subfolder)
df = | pd.DataFrame() | pandas.DataFrame |
from typing import NoReturn, Dict, List, Iterator, Tuple
import numpy as np
import pandas as pd
from qanom import utils
from qanom.annotations.decode_encode_answers import Response, decode_response
def normalize(lst):
a = np.array(lst)
return a / sum(a)
def read_dir_of_csv(dir_path: str, prefix="", suffix="", sep=',') -> pd.DataFrame:
""" Concatenate (all) csv files in directory into one DataFrame """
import os
dfs, sections = zip(*[(read_csv(os.path.join(dir_path, fn), sep=sep), fn.rstrip(".csv"))
for fn in os.listdir(dir_path) if fn.endswith(suffix+".csv") and fn.startswith(prefix)])
return pd.concat(dfs, ignore_index=True, keys=sections, sort=False)
def read_dir_of_annot_csv(dir_path: str, prefix="", suffix="") -> pd.DataFrame:
""" Concatenate (all) csv files in directory into one DataFrame """
import os
dfs, sections = zip(*[(read_annot_csv(os.path.join(dir_path, fn)), fn.rstrip(".csv"))
for fn in os.listdir(dir_path) if fn.endswith(suffix+".csv") and fn.startswith(prefix)])
return pd.concat(dfs, ignore_index=True, keys=sections, sort=False)
def read_csv(file_path: str, sep=',') -> pd.DataFrame:
try:
df = | pd.read_csv(file_path, sep=sep) | pandas.read_csv |
#CSTAT+ A GPU-accelerated spatial pattern analysis algorithm for high-resolution 2D/3D hydrologic connectivity using array vectorization and convolutional neural network
#Author: <NAME>, <NAME>
#Department of Earth, Atmospheric and Planetary Sciences, Purdue University, 550 Stadium Mall Dr, West Lafayette, IN 47907 USA.
#Email: <EMAIL>; Alternative: <EMAIL>
#This is the omnidirectional version: CSTAT+/OMNI
import os
from osgeo import gdal
import numpy as np
import copy as cp
from numpy import genfromtxt as gft
from scipy.ndimage.measurements import label
from itertools import combinations_with_replacement,product
from mxnet import nd,gpu
from timeit import default_timer as timer
import pandas as pd
#Binarize pattern
def prep(expe0,threshold,NoData):
#Provide threshold for High/Low, usually the depth of shallow sheetflow
expe1=cp.deepcopy(expe0)
expe2=cp.deepcopy(expe0)
expe1[(expe1>=threshold)]=1
expe1[(expe1<threshold)]=0
expe2[(expe2==NoData)]=-1
expe2[(expe2>0)]=0
connection_structure = np.array([[1,1,1],[1,1,1],[1,1,1]])
expela, num_features =label (expe1,structure=connection_structure)
expe3=expe2+expela
return (expe3)
def itercontrol(regions,k,bins,dibins,dibins4,binnum):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
bins=nd.array(bins,gpu(0))
dibins=nd.array(dibins,gpu(0))
dibins4=nd.array(dibins4,gpu(0))
if k==2:
#Create segment index for the input array to meet the memory requirement
imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(combinations_with_replacement(imax,2))
for i in iterator:
if i[0]==i[1]:
vout=distanceAA2(regions,i,binnum,dibins,dibins4)
co0+=vout[0]
codi0+=vout[1]
count0+=vout[2]
co4+=vout[3]
count4+=vout[4]
else:
vout=distanceAA1(regions,i,binnum,dibins,dibins4)
co0+=vout[0]
codi0+=vout[1]
count0+=vout[2]
co4+=vout[3]
count4+=vout[4]
return (co0.asnumpy(),codi0.asnumpy(),count0.asnumpy(),co4.asnumpy(),count4.asnumpy())
elif k==1:
#Create segment index for the input array to meet the memory requirement
imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(combinations_with_replacement(imax,2))
for i in iterator:
if i[0]==i[1]:
count0+=distance2(regions,i,binnum,bins)
else:
count0+=distance1(regions,i,binnum,bins)
return (count0.asnumpy())
else:
#Unpack the tuple
regions_high,regions_low=regions
#Create segment index for the input array to meet the memory requirement
imax_high=list(range(int(regions_high.shape[0]/broadcdp)+(regions_high.shape[0]%broadcdp!=0)))
imax_low=list(range(int(regions_low.shape[0]/broadcdp)+(regions_low.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(product(imax_high,imax_low))
for i in iterator:
count0+=distance11(regions_high,regions_low,i,binnum,bins)
return (count0.asnumpy())
def distanceAA1(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change 0 to 180 so it can apply sum of boolean mask without losing values
sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
co0[p]+=nd.nansum(ldis*booleanmask)
#Exclue values not in distance range bin
sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
for q in range (0,5):
booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))
codi0[q,p]+=nd.nansum(booleanmaskdi)
for k in range (0,5):
booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
ldis0=ldis*booleanmaskdi
for l in range (0,binnum-1):
booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
count4[k,l]+=nd.nansum(booleanmask)
co4[k,l]+=nd.nansum(ldis0*booleanmask)
codi0[0,:]+=codi0[4,:]
codi0=codi0[0:4,:]
count4[0,:]+=count4[4,:]
count4=count4[0:4,:]
co4[0,:]+=co4[4,:]
co4=co4[0:4,:]
return(co0,codi0,count0,co4,count4)
def distanceAA2(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
# print ("a1",a1,"b1",b1)
for ii in range (a1.shape[0]-1):
a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
seed=nd.concat(seed,a1_b1,dim=0)
if seed.shape[0]>1:
x1_x2=seed[1:,0]
y1_y2=seed[1:,1]
labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change 0 to 180 so it can apply sum of boolean mask without losing values
sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
co0[p]+=nd.nansum(ldis*booleanmask)
#Exclue values not in distance range bin
sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
for q in range (0,5):
booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))
codi0[q,p]+=nd.nansum(booleanmaskdi)
for k in range (0,5):
booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
ldis0=ldis*booleanmaskdi
for l in range (0,binnum-1):
booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
count4[k,l]+=nd.nansum(booleanmask)
co4[k,l]+=nd.nansum(ldis0*booleanmask)
codi0[0,:]+=codi0[4,:]
codi0=codi0[0:4,:]
count4[0,:]+=count4[4,:]
count4=count4[0:4,:]
co4[0,:]+=co4[4,:]
co4=co4[0:4,:]
return(co0,codi0,count0,co4,count4)
#Full permutation distance computation
def distance1(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
#Full permutation distance computation between different regions: high and low
def distance11(regions_high,regions_low,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions_high[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions_high.shape[0]),:]
b=regions_low[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions_low.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
#Full combination distance computation
def distance2(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
for ii in range (a1.shape[0]-1):
a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
seed=nd.concat(seed,a1_b1,dim=0)
if seed.shape[0]>1:
x1_x2=seed[1:,0]
y1_y2=seed[1:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
def omni(taoh_W,mean_d,cardh_his,taoh_W4,mean_d4,binnum):
#Compute OMNI
OMNIW=np.zeros(binnum,dtype="float32")
OMNIW4=np.zeros((4,binnum),dtype="float32")
#Convert Nan to zero to avoid issues
taoh_W1=np.nan_to_num(taoh_W)
mean_d1=np.nan_to_num(mean_d)
taoh_W41=np.nan_to_num(taoh_W4)
mean_d41=np.nan_to_num(mean_d4)
for j in range (binnum-1):
if taoh_W1[j+1]!=0:
OMNIW[0]+=(taoh_W1[j]+taoh_W1[j+1])*(mean_d1[j+1]-mean_d1[j])*0.5
for k in range (4):
for l in range (binnum-1):
if taoh_W41[k,l+1]!=0:
OMNIW4[k,0]+=(taoh_W41[k,l]+taoh_W41[k,l+1])*(mean_d41[k,l+1]-mean_d41[k,l])*0.5
results=np.vstack((taoh_W1,mean_d1,OMNIW,cardh_his))
results4=np.vstack((taoh_W41,mean_d41,OMNIW4))
return (results,results4)
def compu(flowpattern,bins,dibins,dibins4,binnum,gt):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
coAA=np.zeros((1,binnum-1),dtype="float32")
codiAA=np.zeros((4,binnum-1),dtype="float32")
countAA=np.zeros(binnum-1)
countAZ=np.zeros(binnum-1)
count4AA=np.zeros((4,binnum-1),dtype="float32")
co4AA=np.zeros((4,binnum-1),dtype="float32")
#Create coordinate arrays for each zone and compute distances and directions
#All the domain area excluding NoData
#Area of High
k=1
regionA=np.asarray(np.where(flowpattern>0),dtype="int32").T
if regionA.shape[0]!=0:
countA=itercontrol(regionA,k,bins,dibins,dibins4,binnum)
k=0
regionZ=np.asarray(np.where(flowpattern==0),dtype="int32").T
if regionZ.shape[0]!=0:
countAZ=itercontrol((regionA,regionZ),k,bins,dibins,dibins4,binnum)
#Each connected region in High
k=2#Switch
for i in range (1,np.int32(np.amax(flowpattern)+1)):
regionAA=np.asarray(np.where(flowpattern==i),dtype="int32").T
outAA=itercontrol(regionAA,k,bins,dibins,dibins4,binnum)
coAA+=outAA[0];codiAA+=outAA[1];countAA+=outAA[2];co4AA+=outAA[3];count4AA+=outAA[4]
#Compute connectivity metrics
if np.sum(countAZ)==0:
taoh_W=np.append(1,(countAA/(countA+countAZ)))#;taoh_M=np.append((regionA.shape[0]/regionZ.shape[0]),(countAA/countZ))
else:
taoh_W=np.append(1,(countAA*2/(countA+countAZ)))
#Average connected distances in each range bin
mean_d=np.append(0,(coAA*gt[1]/countAA))
#Histogram of connected directions (4 total fr om East) for each range bin
cardh_his=np.append(np.zeros((4,1),dtype="float32")+regionA.shape[0],codiAA,axis=1)
#Tao(h) and Average connected distances in each cardinal direction (4 total: W-E, NE-SW, N-S, NW-SE)
taoh_W4=np.append(np.zeros((4,1),dtype="float32")+1,count4AA/(countA+countAZ),axis=1)
mean_d4=np.append(np.zeros((4,1),dtype="float32"),co4AA*gt[1]/count4AA,axis=1)
return (taoh_W,mean_d,cardh_his,taoh_W4,mean_d4)
def prires(results,results4,bins,gt):
#Print out results as Pandas dataframe and write to text files
rowlabel=np.array(["taoh_W","mean_distance","OMNIW","CARD_Histogram_WE",
"NE_SW","NS","NW_SE"]).reshape(7,1)
colabel=np.empty(binnum,dtype="U30")
binslabel=np.around(bins*gt[1], decimals=3)
for i in range(binnum-1):
colabel[i+1]="Lag "+str(binslabel[i])+"-"+str(binslabel[i+1])
colabel[0]="Lag 0"
results_df= | pd.DataFrame(results,columns=colabel) | pandas.DataFrame |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
# -----------------------------------------------------------------------
filename = 'dt_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test= X[features_list]
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'rf_finalized_model.sav'
self.other_clf_rf = pickle.load(open(filename3, 'rb'))
y_pred_rf = self.other_clf_rf.predict(X_test)
self.accuracy_rf = accuracy_score(y_test, y_pred_rf) * 100
self.txtAccuracy_rf.setText(str(self.accuracy_rf))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
def view_tree(self):
'''
Executes the graphviz to create a tree view of the information
then it presents the graphic in a pdf formt using webbrowser
:return:None
'''
webbrowser.open_new(r'decision_tree_entropy.pdf')
class RandomForest(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(RandomForest, self).__init__()
self.Title = "Random Forest Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Random Forest Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_dt = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Decision tree:', self.txtAccuracy_dt)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
filename = 'rf_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test = X[features_list]
# -----------------------------------------------------------------------
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'dt_finalized_model.sav'
self.other_clf_dt = pickle.load(open(filename3, 'rb'))
y_pred_dt = self.other_clf_dt.predict(X_test)
self.accuracy_dt = accuracy_score(y_test, y_pred_dt) * 100
self.txtAccuracy_dt.setText(str(self.accuracy_dt))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
class LogisticReg(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(LogisticReg, self).__init__()
self.Title = "Logistic Regression Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Logistic Regression Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_dt = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Decision Tree:', self.txtAccuracy_dt)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : k-fold Cross validation
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('K-fold cross validation')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = | pd.concat([self.list_corr_features, df[features_list[2]]],axis=1) | pandas.concat |
import pandas as pd
df = pd.DataFrame({'X': [0, 1, 2], 'Y': [3, 4, 5]}, index=['A', 'B', 'C'])
print(df)
# X Y
# A 0 3
# B 1 4
# C 2 5
print(df.T)
# A B C
# X 0 1 2
# Y 3 4 5
print(df.transpose())
# A B C
# X 0 1 2
# Y 3 4 5
df = df.T
print(df)
# A B C
# X 0 1 2
# Y 3 4 5
df = pd.DataFrame({'X': [0, 1, 2], 'Y': [3, 4, 5]}, index=['A', 'B', 'C'])
print(df)
# X Y
# A 0 3
# B 1 4
# C 2 5
print(df.dtypes)
# X int64
# Y int64
# dtype: object
print(df.T)
# A B C
# X 0 1 2
# Y 3 4 5
print(df.T.dtypes)
# A int64
# B int64
# C int64
# dtype: object
df_mix = pd.DataFrame({'col_int': [0, 1, 2], 'col_float': [0.1, 0.2, 0.3]}, index=['A', 'B', 'C'])
print(df_mix)
# col_int col_float
# A 0 0.1
# B 1 0.2
# C 2 0.3
print(df_mix.dtypes)
# col_int int64
# col_float float64
# dtype: object
print(df_mix.T)
# A B C
# col_int 0.0 1.0 2.0
# col_float 0.1 0.2 0.3
print(df_mix.T.dtypes)
# A float64
# B float64
# C float64
# dtype: object
print(df_mix.T.T)
# col_int col_float
# A 0.0 0.1
# B 1.0 0.2
# C 2.0 0.3
print(df_mix.T.T.dtypes)
# col_int float64
# col_float float64
# dtype: object
df_mix2 = pd.DataFrame({'col_int': [0, 1, 2], 'col_float': [0.1, 0.2, 0.3], 'col_str': ['a', 'b', 'c']},
index=['A', 'B', 'C'])
print(df_mix2)
# col_int col_float col_str
# A 0 0.1 a
# B 1 0.2 b
# C 2 0.3 c
print(df_mix2.dtypes)
# col_int int64
# col_float float64
# col_str object
# dtype: object
print(df_mix2.T)
# A B C
# col_int 0 1 2
# col_float 0.1 0.2 0.3
# col_str a b c
print(df_mix2.T.dtypes)
# A object
# B object
# C object
# dtype: object
print(df_mix2.T.T)
# col_int col_float col_str
# A 0 0.1 a
# B 1 0.2 b
# C 2 0.3 c
print(df_mix2.T.T.dtypes)
# col_int object
# col_float object
# col_str object
# dtype: object
df = | pd.DataFrame({'X': [0, 1, 2], 'Y': [3, 4, 5]}, index=['A', 'B', 'C']) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def plot_refinement_improvement(accuracy_df: pd.DataFrame, refinement_df: pd.DataFrame, reference_refinement_type: str="original"):
grouped_df = (
| pd.merge(refinement_df, accuracy_df, left_on="info_run_id", right_on="run_id") | pandas.merge |
"""
Backs up ToodleDo
"""
import sys
import os
import requests
import yaml
import pandas as pd
from getpass import getpass
from requests_oauthlib import OAuth2Session
import requests
import urllib
import json
import logging
# TODO modify redirection URI? Localhost is a bit weird, there might be something running there.
# So, just play around with possibilities and see what works.
# TODO create a dummy user account and try to restore info there
# TODO Add writing scope
# TODO Commons with constants? Make sure the script is runnable form anywhere
CUR_FILE_DIR = os.path.dirname(os.path.realpath(__file__))+os.path.sep
API_URL_PREFIX = "http://api.toodledo.com/3/"
GET_URL_POSTFIX = '/get.php'
# Tasks: http://api.toodledo.com/3/tasks/index.php
DEFAULT_TASK_FIELDS = ["id", "title", "modified", "completed"]
OPTIONAL_TASK_FIELDS = ["folder", "context", "goal", "location", "tag", "startdate", "duedate",
"duedatemod", "starttime", "duetime", "remind", "repeat", "status", "star", "priority",
"length", "timer", "added", "note", "parent", "children", "order", "meta", "previous",
"attachment", "shared", "addedby", "via", "attachments"]
DEFAULT_FOLDER_FIELDS = ["id","name","private","archived","ord"]
DEFAULT_CONTEXT_FIELDS = ["id","name","private"]
DEFAULT_GOAL_FIELDS = ["id","name","level","archived","contributes","note"]
DEFAULT_LOCATION_FIELDS = ["id","name","description","lat","lon"]
DEFAULT_NOTES_FIELDS = ["id","title","modified","added","folder","private","text"]
LIST_ROW_DEFAULT_FIELDS=["id","added","modified","version","list","cells"]
LIST_COL_DEFAULT_FIELDS=["id","title","type","sort","width"]
AUTHORIZATION_URL = "https://api.toodledo.com/3/account/authorize.php"
TOKEN_URL = 'https://api.toodledo.com/3/account/token.php'
TOKEN_FILENAME = CUR_FILE_DIR+"token.txt"
CONFIG_FILENAME = CUR_FILE_DIR+"config.yaml"
CLIENT_ID_FIELD = 'CLIENT_ID'
CLIENT_SECRET_FIELD = 'CLIENT_SECRET'
REDIRECT_URL_FIELD = 'REDIRECT_URL'
BACKUP_FOLDER_FIELD = 'BACKUP_FOLDER'
ALL_SCOPES = ["basic","folders", "tasks","notes","outlines","lists"]
def get_token_response(request_body):
access_token, refresh_token = None, None
token_response = requests.post(TOKEN_URL, data = request_body)
if token_response.status_code == 200:
token_dict = json.loads(token_response.text)
if "access_token" in token_dict:
access_token = token_dict["access_token"]
if "refresh_token" in token_dict:
refresh_token = token_dict["refresh_token"]
else:
logging.warning("Failed to refresh. Status: %d. Result:\n%s",
token_response.status_code, str(token_response.text))
return access_token, refresh_token
def get_authorization_response(config, oauth):
authorization_url, state = oauth.authorization_url(AUTHORIZATION_URL)
# Here print is intended. We are working with console.
print('Please go to thir URL and authorize access:')
print(authorization_url)
authorization_response = input('Enter the full callback URL: ')
return authorization_response
def refresh_tokens(config, access_token, refresh_token):
# If failed to refresh, we'll be OK anyway
body = {'client_id': config[CLIENT_ID_FIELD],
'client_secret': config[CLIENT_SECRET_FIELD],
'redirect_uri': config[REDIRECT_URL_FIELD],
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
try:
new_access_token, new_refresh_token = get_token_response(body)
if new_access_token is None:
new_access_token = access_token
logging.info("Keeping old access token: %s", new_access_token)
else:
logging.info("New access token: %s", new_access_token)
if new_refresh_token is None:
new_refresh_token = refresh_token
logging.info("Keeping old refresh token: %s", new_refresh_token)
else:
logging.info("New refresh token: %s", new_refresh_token)
except Exception as e:
logging.warning("Failed to refresh. Might still be OK with old token.", str(e))
new_access_token, new_refresh_token = access_token, refresh_token
return new_access_token, new_refresh_token
def get_tokens_from_scratch(config):
oauth = OAuth2Session(config[CLIENT_ID_FIELD],
redirect_uri=config[REDIRECT_URL_FIELD],
scope=ALL_SCOPES)
authorization_response = get_authorization_response(config, oauth)
connection_success=False
first_time = True
while not connection_success:
try:
if not first_time:
logging.info("Trying to reconnect...")
authorization_response = get_authorization_response(config, oauth)
first_time = False
code = urllib.parse.parse_qs(
urllib.parse.urlsplit(authorization_response).query
)["code"][0]
# Just could not get in OAuth. It kept throwing
# "(missing_token) Missing access token parameter"
# Well, let's just get it working manually then.
body = {'client_id': config[CLIENT_ID_FIELD],
'client_secret': config[CLIENT_SECRET_FIELD],
'code': code,
'redirect_uri': config[REDIRECT_URL_FIELD],
'grant_type': 'authorization_code',
'authorization_response': authorization_response,
}
access_token, refresh_token = get_token_response(body)
connection_success = (access_token is not None)and(refresh_token is not None)
except Exception as e:
logging.warning("Token fetch failed: %s", str(e))
# TODO prevent infinite loop here? Prompt after error?
# Limit the number of retries? Parametrize?
return access_token, refresh_token
def save_tokens(access_token, refresh_token):
with open(TOKEN_FILENAME,"wt") as f:
f.write(access_token+"\n"+refresh_token)
logging.info("Saved tokens")
def get_tokens(config):
access_token = None
refresh_token = None
if os.path.isfile(TOKEN_FILENAME):
with open(TOKEN_FILENAME,"rt") as f:
s = f.read().split('\n')
if len(s) == 2:
access_token, refresh_token = s[0], s[1]
logging.info("Access token from file: %s", access_token)
logging.info("Refresh token from file: %s",refresh_token)
access_token, refresh_token = refresh_tokens(config, access_token, refresh_token)
if access_token is None or refresh_token is None:
access_token, refresh_token = get_tokens_from_scratch(config)
logging.info("Obtained tokens successfully")
logging.info("Final access token: %s", access_token)
logging.info("Final refresh token: %s",refresh_token)
return access_token, refresh_token
def generic_get_and_backup(access_token: str, parameter_name: str,
default_fields: list, optional_fields: list = [],
filename: str=None, readable_table_name: str=None,
url_additions: dict={}, start_from=0, return_json: bool=False):
result_df = pd.DataFrame(columns=default_fields+optional_fields)
readable_table_name = \
readable_table_name if readable_table_name is not None else parameter_name
url = API_URL_PREFIX + parameter_name + GET_URL_POSTFIX
try:
# TODO consider parameters: after=1234567890&f=xml
data = {'access_token': access_token}
if len(optional_fields)>0:
data['fields'] = ",".join(optional_fields)
for i in url_additions:
data[i] = url_additions[i]
response = requests.post(url, data = data)
if response.status_code == 200:
result_json_parsed = json.loads(response.text)
if type(result_json_parsed) == list:
if len(result_json_parsed) > start_from:
result_df = pd.DataFrame(result_json_parsed[start_from:]) # 0 is num and total
logging.info("Read %s successfully", readable_table_name)
else:
logging.info("List of %s is empty", readable_table_name)
else:
logging.warning("Failed to read %s. Response body: %s",
readable_table_name, result_json_parsed)
else:
logging.warning(
"Failed to read %s. Response status code: %d.\n Detailed response: %s",
readable_table_name, response.status_code, str(response.text))
except Exception as e:
logging.warning("Failed to list %s: %s", readable_table_name, str(e))
if filename is not None:
try:
result_df.to_csv(filename, index=False)
logging.info("Saved %s successfully", readable_table_name)
except Exception as e:
logging.warning("Failed to backup %s: %s", readable_table_name, str(e))
else:
logging.info("No filename provided. Not saving %s.", readable_table_name)
if return_json:
return result_df, result_json_parsed
return result_df
def get_raw_tasks(access_token):
"""
Raw tasks contain some fields in human-unreadable form. For example, folder or context.
"""
return generic_get_and_backup(access_token=access_token, parameter_name='tasks',
default_fields=DEFAULT_TASK_FIELDS, optional_fields=OPTIONAL_TASK_FIELDS,
readable_table_name="raw tasks", start_from=1)
def get_and_backup_folders(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='folders', default_fields=DEFAULT_FOLDER_FIELDS)
def get_and_backup_contexts(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='contexts', default_fields=DEFAULT_CONTEXT_FIELDS)
def get_and_backup_goals(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='goals', default_fields=DEFAULT_GOAL_FIELDS)
def get_and_backup_locations(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='locations', default_fields=DEFAULT_LOCATION_FIELDS)
def get_and_backup_notes(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='notes', default_fields=DEFAULT_NOTES_FIELDS)
def backup_list_details(access_token, list_info, lists_path):
list_col_df = pd.DataFrame(list_info["cols"])
try:
list_col_df.to_csv(lists_path+"cols_list_"+str(list_info["id"])+".csv", index=False)
logging.info("Saved list %s columns successfully", list_info["id"])
except Exception as e:
logging.warning("Failed to backup list %s columns: %s", list_info["id"], str(e))
#http://api.toodledo.com/3/rows/get.php?access_token=yourtoken&after=1234567890&list=1234567890
list_row_df, row_json =generic_get_and_backup(
access_token=access_token,
parameter_name='rows',
default_fields=LIST_ROW_DEFAULT_FIELDS,
filename=lists_path+"rows_list_"+str(list_info["id"])+".csv",
url_additions={"list": list_info["id"]},
return_json=True)
row_ids = list()
col_ids = list()
values = list()
if len(list_row_df) > 0:
for i in range(len(row_json)):
for j in range(len(row_json[i]["cells"])):
if ("c"+str(j+1)) in row_json[i]["cells"]:
values.append(row_json[i]["cells"]["c"+str(j+1)])
else:
values.append(None)
col_ids.append(list_info["cols"][j]["id"])
row_ids.append(row_json[i]["id"])
list_cell_df = pd.DataFrame({"value": values, "row_id": row_ids, "column_ids": col_ids})
else:
list_cell_df = pd.DataFrame({"value": [], "row_id": [], "column_ids": []})
list_cell_df["list_id"] = list_info["id"]
list_row_df["list_id"] = list_info["id"]
list_col_df["list_id"] = list_info["id"]
return list_row_df, list_col_df, list_cell_df
def get_and_backup_lists(access_token, backup_path):
result_df = pd.DataFrame(columns=["id","added","modified","title","version","note","keywords","rows"])
url = API_URL_PREFIX + "lists" + GET_URL_POSTFIX
all_list_rows = None
all_list_cols = None
all_list_cells = None
try:
# TODO consider parameters: after=1234567890&f=xml
data = {'access_token': access_token}
response = requests.post(url, data = data)
if response.status_code == 200:
result_json_parsed = json.loads(response.text)
lists_path = backup_path+"Lists"+os.path.sep
if not os.path.isdir(lists_path):
logging.info("Lists directory did not exist. Creating...")
os.mkdir(lists_path)
if type(result_json_parsed) == list:
if len(result_json_parsed) > 0:
for i in result_json_parsed:
cur_list_rows, cur_list_cols, cur_list_cells = backup_list_details(access_token, i, lists_path)
if all_list_rows is None:
all_list_rows = cur_list_rows
else:
all_list_rows = all_list_rows.append(cur_list_rows, ignore_index=True)
if all_list_cols is None:
all_list_cols = cur_list_cols
else:
all_list_cols = all_list_cols.append(cur_list_cols, ignore_index=True)
if all_list_cells is None:
all_list_cells = cur_list_cells
else:
all_list_cells = all_list_cells.append(cur_list_cells, ignore_index=True)
del i["cols"]
result_df = | pd.DataFrame(result_json_parsed) | pandas.DataFrame |
"""
Main bokeh server module
"""
import os
from math import pi
import pandas as pd
# Bokeh imports
from bokeh.plotting import figure
from bokeh.layouts import row, column, layout
from bokeh.models import (
ColumnDataSource,
DatetimeTickFormatter,
)
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
from bokeh.models.widgets import (
TextInput,
DataTable,
TableColumn,
NumberFormatter,
Div,
DatePicker,
Tabs,
Panel,
PreText,
RadioButtonGroup,
)
# azure_usage imports
from .constants import (
CONST_COL_NAME_COST,
CONST_COL_NAME_SERVICE,
CONST_COL_NAME_ANGLE,
CONST_COL_NAME_PERC,
CONST_COL_NAME_DATE,
DATA_FOLDER,
README_FILE,
UPDATES_FILE,
URL_PARAM_REPORT,
URL_PARAM_SUB_IDS,
URL_PARAM_DT_FROM,
URL_PARAM_DT_TO,
CONST_RB_DEFAULT,
CONST_RB_LABEL_0,
CONST_RB_LABEL_1,
CONST_RB_LABEL_2,
CONST_RB_LABEL_3,
CONST_ENCODING,
DEFAULT_REPORT_NAME,
TIMESTAMP_FILE,
DEFAULT_TIMEZONE,
)
from .subs import get_data_for_subid, get_top_services, calc_top_services_perc
from .totals import group_day
from .utilities import prep_sub_ids, parse_url_params, read_timestamp
from .data_loader import create_dataframe
# Global variables
GLOBAL_RAW_USAGE = None
GLOBAL_LAST_UPDATE = None
GLOBAL_SUB_RAW_USAGE = None
GLOBAL_SUB_SERVICE_GRP = None
GLOBAL_TOTAL_SOURCE = None
GLOBAL_WIDGET_SUBID = TextInput()
GLOBAL_DATE_PICKER_FROM = None
GLOBAL_DATE_PICKER_TO = None
GLOBAL_WIDGET_SUBSCRIPTION_NAMES = None
GLOBAL_WIDGET_TOTAL_TEXT = None
GLOBAL_WIDGET_TOP_SERVICES_RB = None
GLOBAL_S1 = None
GLOBAL_S2 = None
GLOBAL_TOP_SERVICE_SOURCE = None
GLOBAL_PWD = os.path.dirname(os.path.realpath(__file__))
def readin_data(data_path=None):
"""Reads in raw Azure usage data.
Returns:
dataframe with the read in data
"""
if data_path is None:
_data_path = os.path.join(
os.path.join(GLOBAL_PWD, "..", ".."), DATA_FOLDER
)
else:
_data_path = data_path
raw_usage = create_dataframe(_data_path)
last_update = read_timestamp(os.path.join(_data_path, TIMESTAMP_FILE))
return raw_usage, last_update
def get_url_params(doc):
"""Reads in url parameters.
Arguments:
doc: a bokeh document to which elements can be added.
Returns:
A dictionary with the parsed url parameters
"""
doc_args = doc.session_context.request.arguments
return parse_url_params(doc_args)
def create_updates_tab():
"""
Creates updates tab
"""
file_path = os.path.join(
os.path.join(GLOBAL_PWD, "..", ".."), UPDATES_FILE
)
updates_text = ""
with open(file_path, "r", encoding=CONST_ENCODING) as myfile:
updates_text = myfile.read()
widget_updates = PreText(text="""%s""" % (updates_text))
updates_widgets = column(children=[row(children=[widget_updates])])
return Panel(child=updates_widgets, title="Release notes")
def create_about_tab():
"""
Creates about tab
"""
file_path = os.path.join(os.path.join(GLOBAL_PWD, "..", ".."), README_FILE)
# about widget
about_text = ""
line_cnt = 0
for line in open(file_path, "r", encoding=CONST_ENCODING):
if not line.startswith("[!["):
about_text += line
line_cnt += 1
widget_about = Div(text="""%s""" % (about_text), width=800)
about_widgets = column(children=[row(children=[widget_about])])
return Panel(child=about_widgets, title="About")
def create_usage_bar_plot():
"""Creates a bar plot to show daily usage"""
bar_plot = figure(
plot_height=400,
plot_width=1200,
title="",
tools="pan, reset, save, wheel_zoom, crosshair, hover",
x_axis_type="datetime",
tooltips=[
(CONST_COL_NAME_DATE, "@Date_str"),
(CONST_COL_NAME_COST, "@Cost{$0.2f}"),
],
min_border_bottom=75,
min_border_left=70,
)
bar_plot.xaxis.formatter = DatetimeTickFormatter(
hours=["%d %B %Y"],
days=["%d %B %Y"],
months=["%d %B %Y"],
years=["%d %B %Y"],
)
bar_plot.xaxis.major_label_orientation = pi / 4
bar_plot.yaxis.major_label_text_font_size = "12pt"
return bar_plot
def create_top_services_pie_plot():
"""Creates top services pie plot"""
pie_plot = figure(
plot_height=500,
plot_width=800,
x_range=(-0.5, 1.0),
tools="hover",
tooltips=[
(CONST_COL_NAME_SERVICE, "@Service"),
(CONST_COL_NAME_COST, "@Cost{$0.2f}"),
(CONST_COL_NAME_PERC, "@Perc{%0.2f}"),
],
)
return pie_plot
# NO UNIT TESTS FOR ALL THE FOLLWING FUNCTIONS
def modify_doc(doc):
"""Main method to create a bokeh document
Arguments:
doc: a bokeh document to which elements can be added.
"""
global GLOBAL_RAW_USAGE
global GLOBAL_LAST_UPDATE
# get the url parameters
url_params = get_url_params(doc)
# reads in the raw usage data
GLOBAL_RAW_USAGE, GLOBAL_LAST_UPDATE = readin_data()
# initialises data sources
initiliase_data_sources()
# sets the layout of the webapp
_set_layout(doc, url_params)
# plots the data
_update_data(doc, url_params)
# changing title
doc.title = DEFAULT_REPORT_NAME
def initiliase_data_sources():
"""
Initialises data sources
"""
global GLOBAL_RAW_USAGE
global GLOBAL_SUB_RAW_USAGE
global GLOBAL_SUB_SERVICE_GRP
global GLOBAL_TOTAL_SOURCE
global GLOBAL_TOP_SERVICE_SOURCE
default_subid = ""
prep_sub_ids_list = prep_sub_ids(default_subid)
GLOBAL_SUB_RAW_USAGE = get_data_for_subid(
GLOBAL_RAW_USAGE, prep_sub_ids_list
)
sub_raw_usage_grp = group_day(GLOBAL_SUB_RAW_USAGE, add_missing_days=True)
GLOBAL_TOTAL_SOURCE = ColumnDataSource(data=sub_raw_usage_grp)
# Generates a data frame of top services which costed more than min_cost,
# rest of the costs added as Other
GLOBAL_SUB_SERVICE_GRP = get_top_services(GLOBAL_SUB_RAW_USAGE)
# calculate percentages for the top services
GLOBAL_SUB_SERVICE_GRP = calc_top_services_perc(GLOBAL_SUB_SERVICE_GRP)
# set the data source object
GLOBAL_TOP_SERVICE_SOURCE = ColumnDataSource(data=GLOBAL_SUB_SERVICE_GRP)
def _update_data(*_):
"""
Updates data
"""
global GLOBAL_RAW_USAGE
global GLOBAL_TOTAL_SOURCE
global GLOBAL_TOP_SERVICE_SOURCE
global GLOBAL_WIDGET_SUBID
global GLOBAL_DATE_PICKER_FROM
global GLOBAL_DATE_PICKER_TO
global GLOBAL_WIDGET_SUBSCRIPTION_NAMES
global GLOBAL_WIDGET_TOTAL_TEXT
global GLOBAL_WIDGET_TOP_SERVICES_RB
all_mode = False
if GLOBAL_WIDGET_SUBID.value.upper() == "ALLMODE":
all_mode = True
new_sub_raw_usage = GLOBAL_RAW_USAGE.copy(deep=True)
prep_sub_ids_list = new_sub_raw_usage["SubscriptionGuid"].unique()
else:
prep_sub_ids_list = prep_sub_ids(GLOBAL_WIDGET_SUBID.value.upper())
new_sub_raw_usage = get_data_for_subid(
GLOBAL_RAW_USAGE, prep_sub_ids_list
)
# Adjusting date interval
if not new_sub_raw_usage.empty:
date_st = pd.Timestamp(GLOBAL_DATE_PICKER_FROM.value)
date_end = | pd.Timestamp(GLOBAL_DATE_PICKER_TO.value) | pandas.Timestamp |
import pandas as pd
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from tensorpack import *
import sys
import os
import tensorflow as tf
from tensorpack.utils.viz import stack_patches
import cv2
import tensorpack.utils.viz as viz
from tensorpack.tfutils import get_tf_version_tuple
from keras import backend as K
from scipy.stats.stats import pearsonr
from skimage.metrics import structural_similarity
from sklearn.metrics import mean_absolute_error, mean_squared_error
import sys
import warnings
warnings.simplefilter("ignore")
def read_single_oct(fpath):
with open(fpath, 'rb') as f:
data = np.frombuffer(f.read(), 'uint8')
cube = data.reshape((200, 1024, 200), order='C')
if 'OS' in fpath:
cube = cube[:, ::-1, ::-1]
# plt.imshow(np.squeeze(np.sum(cube, axis=1)),cmap='gray')
# plt.show()
return cube
def plot_training_perf(path):
pd.options.plotting.backend = "plotly"
files_list = glob(path)
final_df = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import warnings
import glob
import logging
warnings.filterwarnings("ignore")
logging.basicConfig(level=logging.INFO)
import numpy as np
import pandas as pd
| pd.set_option('display.float_format', lambda x: '%.3f' % x) | pandas.set_option |
import numpy as np
import pandas
import matplotlib.pyplot as pl
import math
import matplotlib as mpl
from matplotlib import cm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.utils import resample
from sklearn.metrics import roc_curve as auc
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import KernelPCA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
from scipy.stats import pearsonr
# Custom Script
import aims_analysis as aims
# Define some initial stuff and import analysis functions:
#AA_key_old=['<KEY>','I','C','Y','H','R','N','D','T']
AA_key=['<KEY>']
# So we've got 46 orthogonal (or at least not super correlated)
# dimensions. Add them in to the matrix
# From "Hot spot prediction in protein-protein interactions by an ensemble system"
# Liu et. al. BMC Systems Biology
newnew=pandas.read_csv('app_data/new_props')
oldold=pandas.read_csv('app_data/old_props')
# Again, ugly to hard code in the number of properties (62) but
# For now no harm no foul
properties=np.zeros((62,20))
for i in np.arange(len(AA_key)):
properties[0:16,i]=oldold[AA_key[i]]
properties[16:,i]=newnew[AA_key[i]]
AA_num_key_new=properties[1]
AA_num_key=np.arange(20)+1
def apply_matrix(mono_PCA,max_diffs,mat_size=100,props=properties[1:],ridZero=False,win_size = 3):
# Try to maximize differences across the properties by looking at patterning...
# Re-normalize the properties for use in the matrix...
for i in np.arange(len(props)):
props[i] = props[i]-np.average(props[i])
props[i] = props[i]/np.linalg.norm(props[i])
# Since we'll be averaging shit, let's get rid of all the zeros...
# However, that's going to result in bleed-over across the loops... Is this good or bad?
# This is also going to have a strange effect based upon loop length...
# WHATEVER, Try both
if ridZero:
mono_pca_NEW=np.transpose(mono_PCA)[~np.all(np.transpose(mono_PCA) == 0,axis=1)]
mono_pca_NEW=np.transpose(mono_pca_NEW)
else:
mono_pca_NEW = mono_PCA
# So this is where we should be able to do the averaging
# Initialize the variable
new_mat_mono=np.zeros((mat_size,len(mono_pca_NEW)))
for j in np.arange(len(mono_pca_NEW)): # for every clone
for k in np.arange(len(max_diffs)):
for win_slide in np.arange(win_size):
# This really shouldn't happen, but just in case...
if max_diffs[k,2]+win_slide > len(mono_pca_NEW[0]):
print('Weird error, look closer at your data')
continue
for m in AA_num_key:
if mono_pca_NEW[j,int(max_diffs[k,2]+win_slide)]==m:
# So I think that 0 and 1 should correspond to charge and hydrophobicity...
new_mat_mono[k,j]=new_mat_mono[k,j] + props[int(max_diffs[k,1]),m-1]
return(new_mat_mono/win_size)
# CAN WE DO IT WITH ONE MATRIX???
def get_bigass_matrix(ALL_mono, OneChain = False, giveSize=[], onlyCen = False,
manuscript_arrange=False,special='', alignment = 'center', norm = True):
# THIS TERM IS ACTUALLY IRRELEVANT. NEED TO DEFINE A "GET MASK" function
if OneChain:
mono_PCA = aims.gen_1Chain_matrix(ALL_mono,key=AA_num_key_new/np.linalg.norm(AA_num_key_new),binary=False,giveSize=giveSize)
mono_MI = aims.gen_1Chain_matrix(ALL_mono,key=AA_num_key,binary=False,giveSize=giveSize)
else:
if special =='peptide':
mono_PCA = aims.gen_peptide_matrix(ALL_mono,key=AA_num_key_new/np.linalg.norm(AA_num_key_new),
binary=False)
mono_MI = aims.gen_peptide_matrix(ALL_mono,key=AA_num_key,binary=False)
else:
mono_PCA = aims.gen_tcr_matrix(ALL_mono,key=AA_num_key_new/np.linalg.norm(AA_num_key_new),
binary=False,giveSize=giveSize,manuscript_arrange = manuscript_arrange, alignment = alignment)
mono_MI = aims.gen_tcr_matrix(ALL_mono,key=AA_num_key,binary=False,giveSize=giveSize,
manuscript_arrange=manuscript_arrange, alignment = alignment)
if onlyCen:
mono_PCAF = mono_PCA[:,4:-4]
mono_MIF = mono_MI[:,4:-4]
else:
mono_PCAF = mono_PCA
mono_MIF = mono_MI
BIG_mono = aims.getBig(mono_MIF, norm = norm)
amono,bmono,cmono = np.shape(BIG_mono)
#SO WE CANT JUST USE NP.RESHAPE
#BECAUSE THAT COMMAND IS AGNOSTIC TO THE
#FACT THAT OUR DATA IS ARRANGED BY CLONE...
BIG_mono_final = np.zeros((bmono,amono*cmono))
for i in np.arange(bmono):
BIG_mono_final[i] = BIG_mono[:,i,:].reshape(amono*cmono)
mono_pca_stack = np.hstack([mono_PCAF,BIG_mono_final])
return(mono_pca_stack)
def do_classy_mda(ALL_mono, ALL_poly, matsize = 100, OneChain = False, special= '',
xVal = 'kfold',ridCorr = False, feat_sel = 'none', classif = 'mda'):
mono_dim=np.shape(ALL_mono)[1]
poly_dim=np.shape(ALL_poly)[1]
y_mono_all = np.ones(mono_dim)
y_poly_all = 2*np.ones(poly_dim)
y_all = np.hstack((y_mono_all,y_poly_all))
seqs_all = np.hstack((ALL_mono,ALL_poly))
# Stupid to recreate this matrix every single time... Should do it BEFORE, then splitting
bigass_matrix = get_bigass_matrix(seqs_all, OneChain = OneChain, special = special)
# Alright so here is where the data is actually split into the test/train/etc.
if xVal == 'loo':
crosser = LeaveOneOut()
elif xVal == 'kfold':
crosser = KFold(n_splits=10,shuffle=True) # Lets do 10x cross validation
elif xVal == 'strat_kfold':
# Random State can be defined if we want a reproducible shuffle
crosser = StratifiedKFold(n_splits=10, random_state=None, shuffle=True)
cycle = 0
for train, test in crosser.split(bigass_matrix, y_all):
X_train_pre, X_test_pre, y_train, y_test = bigass_matrix[train,:], bigass_matrix[test,:], y_all[train], y_all[test]
# REMOVE HIGHLY CORRELATED FEATURES
if ridCorr:
pandaMatTrain = | pandas.DataFrame(X_train_pre) | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_featAcc = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_featAcc
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_perm = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_perm
def preProcessFeatSc():
dicKNN = json.loads(allParametersPerformancePerModel[5])
dfKNN = pd.DataFrame.from_dict(dicKNN)
return dfKNN
# remove that maybe!
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
for loop, elements in enumerate(values):
rowSum = elements*factors[loop] + rowSum
if sum(factors) == 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelColl):
if (index == 19):
metricsPerModelColl[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
models = KNNModels + SVCModels + GausNBModels + MLPModels + LRModels + LDAModels + QDAModels + RFModels + ExtraTModels + AdaBModels + GradBModels
return models
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2, random_state=RANDOM_SEED).fit_transform(data)
tsne.shape
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]
def InitializeEnsemble():
XModels = PreprocessingMetrics()
global ModelSpaceMDS
global ModelSpaceTSNE
global allParametersPerformancePerModel
global impDataInst
XModels = XModels.fillna(0)
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
ModelSpaceUMAP = FunUMAP(XModels)
PredictionProbSel = PreprocessingPred()
PredictionSpaceMDS = FunMDS(PredictionProbSel)
PredictionSpaceTSNE = FunTsne(PredictionProbSel)
PredictionSpaceTSNE = PredictionSpaceTSNE.tolist()
PredictionSpaceUMAP = FunUMAP(PredictionProbSel)
ModelsIDs = preProceModels()
impDataInst = processDataInstance(ModelsIDs,allParametersPerformancePerModel)
callPreResults()
key = 0
EnsembleModel(ModelsIDs, key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP)
def processDataInstance(ModelsIDs, allParametersPerformancePerModel):
dicKNN = json.loads(allParametersPerformancePerModel[8])
dicKNN = json.loads(dicKNN)
dicSVC = json.loads(allParametersPerformancePerModel[17])
dicSVC = json.loads(dicSVC)
dicGausNB = json.loads(allParametersPerformancePerModel[26])
dicGausNB = json.loads(dicGausNB)
dicMLP = json.loads(allParametersPerformancePerModel[35])
dicMLP = json.loads(dicMLP)
dicLR = json.loads(allParametersPerformancePerModel[44])
dicLR = json.loads(dicLR)
dicLDA = json.loads(allParametersPerformancePerModel[53])
dicLDA = json.loads(dicLDA)
dicQDA = json.loads(allParametersPerformancePerModel[62])
dicQDA = json.loads(dicQDA)
dicRF = json.loads(allParametersPerformancePerModel[71])
dicRF = json.loads(dicRF)
dicExtraT = json.loads(allParametersPerformancePerModel[80])
dicExtraT = json.loads(dicExtraT)
dicAdaB = json.loads(allParametersPerformancePerModel[89])
dicAdaB = json.loads(dicAdaB)
dicGradB = json.loads(allParametersPerformancePerModel[98])
dicGradB = json.loads(dicGradB)
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = | pd.DataFrame.from_dict(dicQDA) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from aristote.feature_extraction.tfidf import Tfidf
class TestTfIdf(object):
def test_transform(self):
tfidf = Tfidf()
docs = ['where you from', 'where are you']
tfidf.fit_model(documents=docs)
data = tfidf.transform(document=docs)
t_data = np.array(
[[0., 0.70490949, 0.50154891, 0.50154891], [0.70490949, 0., 0.50154891, 0.50154891]]
)
t_data = [[round(x, 3) for x in xx] for xx in t_data]
data = [[round(x, 3) for x in xx] for xx in data]
assert data == t_data
def test_get_features_name(self):
tfidf = Tfidf()
docs = ['where you from', 'where are you']
tfidf.fit_model(documents=docs)
columns = tfidf.get_features_name
t_columns = ['are', 'from', 'where', 'you']
assert columns == t_columns
def test_end_to_end(self):
tfidf = Tfidf()
docs = ['where you from', 'where are you']
tfidf.fit_model(documents=docs)
data = tfidf.transform(document=docs)
columns = tfidf.get_features_name
data = [[round(x, 3) for x in xx] for xx in data]
df = pd.DataFrame(data)
df.columns = columns
t_data = np.array(
[[0., 0.70490949, 0.50154891, 0.50154891], [0.70490949, 0., 0.50154891, 0.50154891]]
)
t_columns = ['are', 'from', 'where', 'you']
t_data = [[round(x, 3) for x in xx] for xx in t_data]
t_df = | pd.DataFrame(t_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""arima_btc_monthly.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1UD3t9q6t9vDNTdDfGC1NrS46XNE15xvi
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
from matplotlib.pyplot import figure
from datetime import datetime
from google.colab import drive
drive.mount('/content/drive')
BTC_monthly = pd.read_csv('/content/drive/Shareddrives/Crypto SP500 /data for arima/BTC-USD-monthly.csv')
BTC_monthly['Date'] = pd.to_datetime(BTC_monthly['Date'], infer_datetime_format=True)
BTC_monthly
line = plt.plot(BTC_monthly['Date'],BTC_monthly['Close'], 'green',label = 'BTC')
plt.title('BTC-usd price')
plt.legend()
plt.xticks(rotation = -45)
plt.xlabel("Date")
plt.ylabel("BTC price")
plt.show()
"""https://www.kaggle.com/freespirit08/time-series-for-beginners-with-arima
reference
"""
# Commented out IPython magic to ensure Python compatibility.
from datetime import datetime
import numpy as np #for numerical computations like log,exp,sqrt etc
import pandas as pd #for reading & storing data, pre-processing
import matplotlib.pylab as plt #for visualization
#for making sure matplotlib plots are generated in Jupyter notebook itself
# %matplotlib inline
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
BTC_monthly.drop(columns=(['Open','High','Low','Adj Close','Volume']),axis=1,inplace=True)
indexedBTC_monthly=BTC_monthly.set_index(['Date'])
indexedBTC_monthly
#From the plot below, we can see that there is a Trend compoenent in th series.
#Hence, we now check for stationarity of the data
#Determine rolling statistics
rolmean = indexedBTC_monthly.rolling(window=12).mean() #window size 12 denotes 12 months, giving rolling mean at yearly level
rolstd = indexedBTC_monthly.rolling(window=12).std()
print(rolmean,rolstd)
#Plot rolling statistics
orig = plt.plot(indexedBTC_monthly, color='blue', label='Original Price')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label='Rolling Std')
plt.legend(loc='best') #upper left location
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
'''
"From the above graph, we see that rolling mean itself has a trend component even though
rolling standard deviation is fairly constant with time. For our time series to be stationary,
we need to ensure that both the rolling statistics ie: mean & std. dev. remain time invariant or constant with time.
Thus the curves for both of them have to be parallel to the x-axis, which in our case is not so.
To further augment our hypothesis that the time series is not stationary, let us perform the ADCF test."
'''
#Perform Augmented Dickey–Fuller test:
print('Results of Dickey Fuller Test:')
dftest = adfuller(indexedBTC_monthly['Close'], autolag='AIC')
dfoutput = | pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) | pandas.Series |
import pandas as pd
import pyomo.environ as pe
import os
import shutil
class invsys:
def __init__(self,inp_folder='',dshed_cost=1000000,rshed_cost=500,vmin=0.8,vmax=1.2,sbase=100,ref_bus=0):
"""Initialise the investment problem.
:param str inp_folder: The input directory for the data. It expects to find several CSV files detailing the system input data (Default current folder)
:param float dshed_cost: Demand Shedding Price (Default 1000000)
:param float rshed_cost: Renewable Shedding Price (Default 500)
:param float vmin: Minimum node voltage (Default 0.8)
:param float vmax: Maximum node voltage (Default 1.2)
:param float sbase: Base Apparent Power (default 100 MVA)
:param int ref_bus: Reference node (Default 0)
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.invsys("3bus_inv")
"""
self.cgen = | pd.read_csv(inp_folder+os.sep+'cgen_dist.csv') | pandas.read_csv |
#! /usr/bin/python
import json
import altair
import pandas
import datetime
import bs4
import os
import csv
import statistics
import locale
#define constants
#TODO Clean up to removal duplicate calls for yesterday
workingDir = os.getcwd()
yesterdayDate = datetime.date.today() - datetime.timedelta(1)
yesterday = yesterdayDate.strftime('%Y-%m-%d')
yesterdayDay = yesterdayDate.day
yesterdayDayName = yesterdayDate.strftime("%A")
yesterdayMonth = yesterdayDate.month
yesterdayMonthName = yesterdayDate.strftime("%B")
yesterdayYear= yesterdayDate.year
yesterdayYearName = yesterdayDate.strftime("%Y")
locale.setlocale(locale.LC_ALL, 'en_CA')
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
#load weather data
#weatherData = pandas.read_csv('weatherData'+counterList['VictoriaWeatherStation'][0]+'.csv',parse_dates=['Date']).set_index('Date')
weatherData = pandas.read_csv('weatherData114.csv',parse_dates=['Date']).set_index('Date')
#read in counters list
counterList = pandas.read_csv('countersToVisualize.csv',parse_dates=['FirstDate','FirstFullYear'], dtype={'VictoriaWeatherStation': str})
for index,row in counterList.iterrows():
print(row[['CounterID']][0])
counterName = row[['CounterName']][0]
counterStartDate = row[['FirstDate']][0]
#load data
countFile = "counts-" + str(row[['CounterID']][0]) + ".csv"
countExportFile = "counts-" + str(row[['CounterID']][0]) + "-export.csv"
countData = pandas.read_csv(countFile,parse_dates=['Date'])
specialDateFile = "specialDates.csv"
specialDateData = pandas.read_csv(specialDateFile,parse_dates=['Date'])
#setup counter map
mapHTML ="var countmap = L.map('counterMap').setView([" + row[['Location']][0] + "], 15);var marker = L.marker([" + row[['Location']][0] + "]).addTo(countmap);var background = L.tileLayer.provider('Stamen.Toner').addTo(countmap);"
#resample to daily count
dailyCount = countData.resample('D',on='Date').sum()
#determine total rides
totalRides = dailyCount['Count'].sum()
#remove all data from partial years
dailyCount = dailyCount.loc[dailyCount.index >= row[['FirstFullYear']][0]]
#add columns for weekly, monthly, and yearly cumulative total, plus weekday and day of the year
YearlyCumSum = dailyCount.groupby(dailyCount.index.to_period('y')).cumsum()
YearlyCumSum.rename(columns={'Count':'YearlyCumSum'}, inplace=True)
MonthlyCumSum = dailyCount.groupby(dailyCount.index.to_period('m')).cumsum()
MonthlyCumSum.rename(columns={'Count':'MonthlyCumSum'}, inplace=True)
WeeklyCumSum = dailyCount.groupby(dailyCount.index.strftime('%y%W')).cumsum()
WeeklyCumSum.rename(columns={'Count':'WeeklyCumSum'}, inplace=True)
dailyCount = pandas.merge(dailyCount,YearlyCumSum,on='Date')
dailyCount = pandas.merge(dailyCount,MonthlyCumSum,on='Date')
dailyCount = pandas.merge(dailyCount,WeeklyCumSum,on='Date')
dailyCount['WeekNum']=dailyCount.index.strftime('%W')
dailyCount['Weekday']=dailyCount.index.dayofweek
dailyCount['DayOfYear'] = dailyCount.index.dayofyear
#append weather data to dailyCount
dailyCount = pandas.merge(dailyCount,weatherData,on=['Date'])
#write that list out to a csv file
dailyCount.to_csv(countExportFile)
#get yesterdays count
yesterdayCount = dailyCount.loc[dailyCount.index==yesterday]['Count'][0]
yesterdayCountString = locale.format_string("%d",yesterdayCount, grouping=True)
#determine daily rank
dailyRankAll = dailyCount.loc[dailyCount['Count']>yesterdayCount]['Count'].size+1
dailyRankThisYear=dailyCount.loc[dailyCount.index.year==datetime.datetime.now().year].loc[dailyCount['Count']>yesterdayCount]['Count'].size+1
dailyRankDayOnly=dailyCount.loc[dailyCount.index.dayofweek==yesterdayDate.weekday()].loc[dailyCount['Count']>yesterdayCount]['Count'].size+1
dailyRankMonthOnly=dailyCount.loc[dailyCount.index.month==yesterdayMonth].loc[dailyCount['Count']>yesterdayCount]['Count'].size+1
#TODO = genericize
if dailyRankAll==1:
dailyRankAll=None
else:
dailyRankAll = str(ordinal(dailyRankAll)) + " "
if dailyRankThisYear==1:
dailyRankThisYear=None
else:
dailyRankThisYear = str(ordinal(dailyRankThisYear)) + " "
if dailyRankDayOnly==1:
dailyRankDayOnly=None
else:
dailyRankDayOnly = str(ordinal(dailyRankDayOnly)) + " "
if dailyRankMonthOnly==1:
dailyRankMonthOnly=None
else:
dailyRankMonthOnly = str(ordinal(dailyRankMonthOnly)) + " "
#Check if yesterday was anything special
try:
specialDateStringYesterday = "Yesterday was " + specialDateData[specialDateData['Date']==yesterdayDate].Event.iloc[0] + ""
except IndexError as error:
specialDateStringYesterday = None
#craft the string for yesterday
countString ="Yesterday saw " + yesterdayCountString + " bike rides,"
countStringYearlyRank = "".join(filter(None,("...", dailyRankThisYear,"busiest day of ",yesterdayYearName)))
countStringMonthlyRank = "".join(filter(None,("...", dailyRankMonthOnly,"busiest day in ",yesterdayMonthName)))
countStringDayRank = "".join(filter(None,("...", dailyRankDayOnly,"busiest ",yesterdayDayName)))
countStringOverallRank = "".join(filter(None,("...", dailyRankAll,"busiest day overall")))
#resample to monthly count
monthlyCount = countData.resample('M',on='Date').sum()
monthlyCount['Month'] = monthlyCount.index
monthlyCount = monthlyCount.loc[monthlyCount['Month'] >= '2015-01-31']
#Pull out the current month
currentMonth = pandas.DataFrame(dailyCount.loc[dailyCount.index.month==yesterdayMonth])
#check if we are ahead or behind the average monthly cumulative sum
#TODO genericize the function to deal with both yearly & monthly
yesterdayMonthCumSumMean=statistics.mean(dailyCount.loc[(dailyCount.index.month==yesterdayMonth) & (dailyCount.index.day==yesterdayDay)].MonthlyCumSum)
yesterdayMonthlyCumSum= | pandas.DataFrame(dailyCount.loc[dailyCount.index==yesterday]) | pandas.DataFrame |
# input files: train_image.csv, train_label.csv, test_image.csv
# output file: test_predictions.csv
# training: 10000 images
# Hyperparameters
NUM_INPUT = 784
NUM_H1 = 256
NUM_H2 = 128
NUM_H3 = 64
NUM_OUTPUT = 10
LEARNING = 0.01
BATCH_SIZE = 30
NUM_EPOCH = 30
import numpy as np
import pandas as pd
import sys
import matplotlib.pyplot as plt
import time
'''
Neural Network Model:
Input Layer: 784 (every pixel in 28x28 input image)
Hidden Layer1 (h1): NUM_H1 (relu activation function)
Hidden Layer2 (h2): NUM_H2 (relu activation function)
Hidden Layer2 (h3): NUM_H3 (relu activation function)
Output Layer: 10 (softmax function)
crossentropy error function
SGD learning, lr = 0.3
'''
class NN:
def __init__(self):
# initialize and save weights using Xavier weight initialization
self.w1, self.b1 = self._init_weights(NUM_INPUT, NUM_H1)
self.w2, self.b2 = self._init_weights(NUM_H1, NUM_H2)
self.w3, self.b3 = self._init_weights(NUM_H2, NUM_H3)
self.w4, self.b4 = self._init_weights(NUM_H3, NUM_OUTPUT)
def _init_weights(self, num_input, num_output):
w = np.random.normal(0.0, 0.01, (num_input, num_output))
b = np.zeros([1,num_output])
return w,b
# num_samples -> num_samples x num_output
def one_hot_encoded(self, labels):
n = len(labels)
output = np.zeros((n, NUM_OUTPUT), dtype=float)
for i in range(n):
output[i][labels[i]] = 1.0
return output
# apply the function to each element in a matrix
def _sigmoid(self, x):
return np.piecewise(x,[x > 0],[lambda i: 1. / (1. + np.exp(-i)), lambda i: np.exp(i) / (1. + np.exp(i))],)
# 1d array
def _d_sigmoid(self, sigma):
return sigma * (1.0 - sigma)
def _relu(self, x):
return np.maximum(0.,x)
def _d_relu(self,x,z):
x[z<0] = 0.0
return x
# apply softmax to each instance
def _softmax(self, x):
x_max = np.max(x, axis=1, keepdims=True)
exp = np.exp(x-x_max)
return exp / np.sum(exp, axis=1, keepdims=True)
# average cross loss
def cross_entropy_loss(self, a3, y):
m = y.shape[0]
a3 = np.clip(a3, 1e-12, None)
return -(1.0 / m) * np.sum(y * np.log(a3))
# forward propagation
def forward_propagation(self, X):
z1 = np.matmul(X, self.w1) + self.b1
a1 = self._relu(z1)
z2 = np.matmul(a1, self.w2) + self.b2
a2 = self._relu(z2)
z3 = np.matmul(a2, self.w3) + self.b3
a3 = self._relu(z3)
z4 = np.matmul(a3, self.w4) + self.b4
a4 = self._softmax(z4)
a = [a1, a2, a3, a4]
z = [z1, z2, z3, z4]
return a4, a, z
# back propagation
def back_propagation(self, X, Y, a, z):
m = X.shape[0]
a1, a2, a3, a4 = a[0], a[1], a[2], a[3]
z1, z2, z3, z4 = z[0], z[1], z[2], z[3]
dz4 = (1.0 / m)*(a4 - Y)
dw4 = np.dot(a3.T, dz4)
db4 = np.sum(dz4, axis=0, keepdims = True)
dz3 = np.dot(dz4, self.w4.T)
dz3 = self._d_relu(dz3, z3)
dw3 = np.dot(a2.T, dz3)
db3 = np.sum(dz3, axis=0, keepdims = True)
dz2 = np.dot(dz3, self.w3.T)
dz2 = self._d_relu(dz2, z2)
dw2 = np.dot(a1.T, dz2)
db2 = np.sum(dz2, axis=0, keepdims = True)
dz1 = np.dot(dz2, self.w2.T)
dz1 = self._d_relu(dz1, z1)
dw1 = np.dot(X.T, dz1)
db1 = np.sum(dz1, axis=0, keepdims = True)
dw = [dw1, dw2, dw3, dw4]
db = [db1, db2, db3, db4]
return dw, db
# update weights
def update(self, dw, db, learning_rate):
self.w1 = self.w1 - learning_rate * dw[0]
self.b1 = self.b1 - learning_rate* db[0]
self.w2 = self.w2 - learning_rate * dw[1]
self.b2 = self.b2 - learning_rate* db[1]
self.w3 = self.w3 - learning_rate * dw[2]
self.b3 = self.b3 - learning_rate* db[2]
self.w4 = self.w4 - learning_rate * dw[3]
self.b4 = self.b4 - learning_rate* db[3]
return
def train(self, inputs, labels, learning_rate=LEARNING, num_epoch=NUM_EPOCH, batch_size=BATCH_SIZE):
targets = self.one_hot_encoded(labels)
# normalize inputs
# inputs = inputs/np.linalg.norm(inputs, ord=2, axis=1, keepdims=True)
N = len(inputs)
indexes = [i for i in range(N)]
cost_list = []
for e in range(num_epoch):
# shuffle training data
np.random.shuffle(indexes)
Y_hat_epoch = np.zeros_like(targets)
predictions = np.zeros_like(labels)
curr_start = 0
while curr_start < N:
curr_end = min(curr_start + BATCH_SIZE, N)
X = inputs[indexes[curr_start:curr_end]]
Y = targets[indexes[curr_start:curr_end]]
# forward
y_hat, a, z = self.forward_propagation(X)
Y_hat_epoch[curr_start:curr_end] = y_hat
predictions[curr_start:curr_end] = self.output_class(y_hat).flatten()
# back
dw, db = self.back_propagation(X, Y, a, z)
# update parameter with average gradients in a mini-batch
self.update(dw, db, learning_rate)
curr_start += BATCH_SIZE
Y_epoch = targets[indexes]
cost = self.cross_entropy_loss(Y_hat_epoch, Y_epoch)
accurate = 0.
for i in range(len(labels)):
if labels[indexes[i]] == predictions[i]:
accurate += 1.
accuracy = accurate / len(labels)
print("At {}th epoch, cost is {}, train accuracy is {}".format(e + 1, cost, accuracy))
return
# num_output*10 --> num_output
def output_class(self, nn_output):
return np.argmax(nn_output, axis=1)
# given list of inputs output list of predicted classes
def test(self, X):
# normalize inputs
# X = inputs/np.linalg.norm(X, ord=2, axis=1, keepdims=True)
nn_output, a,z = self.forward_propagation(X)
return self.output_class(nn_output)
# read image
def read_image_file(filename):
df = pd.read_csv(filename, header = None)
return df.values
#return df.to_numpy()
# read labels
def read_label_file(filename):
df = pd.read_csv(filename, header = None)
return df.values.flatten()
#return df.to_numpy().flatten()
# write output, output is an array of classes
def write_output_file(filename, output):
output_2d = output.reshape((len(output),-1))
df = | pd.DataFrame(output_2d) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tanh Kernel Approximation
"""
import os
import random
import pandas as pd
import math
import datetime
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from scipy.stats import norm
from QMC import halton
def baseline_eval_third(x, G, m):
"""Calculate the result of baseline random feature mapping
Parameters
----------
x: array, dimension = d
The data point to input to the baseline mapping
G: matrix, dimension = m*d
The matrix in the baseline random feature mapping
m: integer
The number of dimension that we want to reduce to
"""
return (1/m**0.5) * np.tanh(np.dot(G, x))
def gram_schmidt_columns(X):
'''
Using QR decomoposition to obtain orthogonal matrix.
Parameters
----------
X : matrix, dimension = m * d, where m <= d
Random feature matrix with l2 normalized row.
Returns
-------
Q : matrix, dimension = m * d, where m <= d
Orthogonal random feature matrix with l2 normalized row.
'''
Q, R = np.linalg.qr(X)
return Q
def orthgonalize(V):
'''
Generate matrix with multiple orthogonal blocks
Parameters
----------
V : matrix, dimension = m * d, where m > d
Random feature matrix with l2 normalized row.
Returns
-------
V_ : TYPE
Random feature matrix with l2 normalized row and multiple
blocks.
'''
N = V.shape[0]
d = V.shape[1]
turns = int(N/d)
remainder = N%d
V_ = np.zeros_like(V)
for i in range(turns):
v = gram_schmidt_columns(V[i*d:(i+1)*d, :].T).T
V_[i*d:(i+1)*d, :] = v
if remainder != 0:
V_[turns*d+1:,:] = gram_schmidt_columns(V[turns*d+1:,:].T).T
return V_
def find_sigma(random_sample):
'''
Find an appropriate scaling parameter for the kernel value.
Parameters
----------
random_sample : list
Store some samples from the dataset.
Returns
-------
float
Average of all 50th smallest distances.
'''
all_distances = []
for i in range(len(random_sample)):
#print(f'Calculating the distance of {i}th samples')
distances = []
for j in range(len(random_sample)):
if j!=i:
distances.append(np.linalg.norm(random_sample[i] - random_sample[j]))
distances.sort()
all_distances.append(distances[50])
return np.mean(all_distances)
def random_rotation(matrix, G_ortho):
'''
Perform random rotation.
Parameters
----------
matrix : matrix
The matrix to rotate.
G_ortho : matrix
The matrix for .
Returns
-------
result : TYPE
DESCRIPTION.
'''
result = np.zeros_like(matrix)
m = matrix.shape[0]
d = matrix.shape[1]
for i in range(m):
result[i, :] = np.dot(G_ortho[:d], matrix[i, :])
return result
def read_in_nomc_matrices(d, d_):
'''
Read in pre-calculated near orthogonal random matrices.
Parameters
----------
d : int
Dimension of data features.
d_ : int
Number of multipliers.
Returns
-------
all_V : list
A list of NOMC matrices.
'''
all_V = []
for N in [d*i for i in range(1,d_+1)]:
V_s = []
for m in range(500):
try:
with open(f'{os.getcwd()}/data Apr13/N={N}/V_d=10_N={N}_iter=20000_m={m}.npy', 'rb') as f:
# The address above is subject to change
V = np.load(f)
V_s.append(V)
except:
pass
all_V.append(V_s)
return all_V
def generate_data(d):
'''
Data generator. Generate data for kernel approximation
Parameters
----------
d : int
Dimension of data features.
Returns
-------
data : list
A list of features.
'''
letters = pd.read_csv('letter-recognition.csv')
letters = np.asarray(letters)
data = np.zeros((letters.shape[0], d))
for i in range(letters.shape[0]):
for j in range(1, data.shape[1]+1):
data[i,j-1] = letters[i,j]
data = list(data)
return data
def generate_halton_sequences(episode, epoch):
'''
Generate halton sequences
Parameters
----------
episode: int
Number of outer experiments.
epoch: int
Number of inner experiments.
Returns
-------
all_halton : list
A list of halton sequences.
'''
all_halton = []
for i in tqdm(range(1, epoch * episode + 1), position = 0, leave = True):
all_halton.append(halton(i, 10))
return all_halton
def generate_qmc_features(d, d_, epoch, all_halton):
'''
Generate random features using Quasi Monte Carlo, which leverages
the halton sequences
Parameters
----------
d : int
Dimension of data features.
d_ : int
Number of multipliers.
epoch : int
Number of inner experiments.
all_halton : list
A list of halton sequences.
Returns
-------
None.
'''
all_V_qmc = []
for n in [d*i for i in range(1,d_+1)]:
V_qmc = []
for i in range(epoch):
V_qmc.append(norm.ppf(all_halton[n*i: n*(i+1)]))
all_V_qmc.append(V_qmc)
return all_V_qmc
def plot(episode, d_, MSE_iid_, MSE_qmc_, MSE_orthog_, MSE_max_ang_):
'''
Plot the MSE at each multiplier of all methods.
Parameters
----------
episode : int
Number of outer experiments.
d_ : int
Number of multipliers.
MSE_iid_ : list
MSEs of MC in all experiments.
MSE_qmc_ : list
MSEs of QMC in all experiments.
MSE_orthog_ : list
MSEs of BOMC in all experiments.
MSE_max_ang_ : list
MSEs of NOMC in all experiments.
Returns
-------
None.
'''
x1 = range(1, d_+1)
x1 = x1 + np.zeros((episode, d_))
x1 = np.sort(x1.reshape(episode*d_,))
mse_iid_ = np.asarray(MSE_iid_).reshape(-1)
category = ['iid' for i in range(episode*d_)]
df1 = pd.DataFrame({'D/d': x1, 'MSE':mse_iid_, 'Category':category})
mse_qmc_ = np.asarray(MSE_qmc_).reshape(-1)
category = ['QMC' for i in range(episode*d_)]
df2 = pd.DataFrame({'D/d': x1, 'MSE':mse_qmc_, 'Category':category})
mse_ortho_ = np.asarray(MSE_orthog_).reshape(-1)
category = ['OG' for i in range(episode*d_)]
df3 = | pd.DataFrame({'D/d': x1, 'MSE':mse_ortho_, 'Category':category}) | pandas.DataFrame |
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from opacus import PrivacyEngine, utils, autograd_grad_sample
try:
from .dpctgan import DPCTGAN
from .patectgan import PATECTGAN
except:
import logging
logger = logging.getLogger(__name__)
logger.warning('Requires "pip install ctgan" for DPCTGAN')
from .privacy_utils import weights_init, pate, moments_acc
class Generator(nn.Module):
def __init__(self, latent_dim, output_dim, binary=True):
super(Generator, self).__init__()
def block(in_, out, Activation):
return nn.Sequential(
nn.Linear(in_, out, bias=False),
nn.LayerNorm(out),
Activation(),
)
self.layer_0 = block(latent_dim, latent_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2))
self.layer_1 = block(latent_dim, latent_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2))
self.layer_2 = block(latent_dim, output_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2))
def forward(self, noise):
noise = self.layer_0(noise) + noise
noise = self.layer_1(noise) + noise
noise = self.layer_2(noise)
return noise
class Discriminator(nn.Module):
def __init__(self, input_dim, wasserstein=False):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, 2*input_dim // 3),
nn.LeakyReLU(0.2),
nn.Linear(2*input_dim // 3, input_dim // 3),
nn.LeakyReLU(0.2),
nn.Linear(input_dim // 3, 1)
)
if not wasserstein:
self.model.add_module("activation", nn.Sigmoid())
def forward(self, x):
return self.model(x)
class DPGAN:
def __init__(self,
binary=False,
latent_dim=64,
batch_size=64,
epochs=1000,
delta=1e-5,
epsilon=1.0):
self.binary = binary
self.latent_dim = latent_dim
self.batch_size = batch_size
self.epochs = epochs
self.delta = delta
self.epsilon = epsilon
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.pd_cols = None
self.pd_index = None
def train(self, data, categorical_columns=None, ordinal_columns=None, update_epsilon=None):
if update_epsilon:
self.epsilon = update_epsilon
if isinstance(data, pd.DataFrame):
for col in data.columns:
data[col] = | pd.to_numeric(data[col], errors='ignore') | pandas.to_numeric |
# DGCNN classification of microbial correlation networks and
# node importance calculation
import stellargraph as sg
try:
sg.utils.validate_notebook_version("1.2.1")
except AttributeError:
raise ValueError(
f"This notebook requires StellarGraph version 1.2.1, but a different version {sg.__version__} is installed. Please see <https://github.com/stellargraph/stellargraph/issues/1172>."
) from None
import pandas as pd
import numpy as np
from stellargraph.mapper import PaddedGraphGenerator
from stellargraph.layer import DeepGraphCNN
from stellargraph import StellarGraph
from sklearn import model_selection
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Conv1D, MaxPool1D, Dropout, Flatten
from tensorflow.keras.losses import binary_crossentropy, categorical_crossentropy
from tensorflow.keras.metrics import Precision, Recall, AUC
import tensorflow as tf
import pickle
def read_graphs2(W, node_features=None):
"""Read graphs into list of StellarGraph instances
Args:
W: dataframe of graphs with 4 columns: graph_id, source, target, weight
node_features: node feature matrix, if None will use identity
"""
out = list()
gid0 = W.iloc[0].at['graph_id']
g0 = W[W.graph_id == gid0]
nodes = list(set(g0.source).union(set(g0.target)))
if not node_features:
node_features = sg.IndexedArray(np.identity(len(nodes)), index=list(nodes))
for _, g in W.groupby('graph_id'):
out.append(StellarGraph(nodes=node_features,
edges=g.drop(columns='graph_id'),
node_type_default='microbe',
edge_type_default='correlation'))
# Check all graphs have the same number of nodes
nn = [g.number_of_nodes() for g in out]
if not all(nn[0] == x for x in nn):
raise ValueError(
"Not all graphs have same number of nodes."
)
return out
def train_dgcnn(graphs, graph_labels, n_epochs=50):
""" Build and train DGCNN model """
generator = PaddedGraphGenerator(graphs=graphs)
k = graphs[0].number_of_nodes() # the number of rows for the output tensor, no truncation
# done here because all graphs have same number of nodes
layer_sizes = [32, 32, 32, 1]
dgcnn_model = DeepGraphCNN(
layer_sizes=layer_sizes,
activations=["tanh", "tanh", "tanh", "tanh"],
k=k,
bias=False,
generator=generator,
)
x_inp, x_out = dgcnn_model.in_out_tensors()
x_out = Conv1D(filters=16, kernel_size=sum(layer_sizes), strides=sum(layer_sizes))(x_out)
x_out = MaxPool1D(pool_size=2)(x_out)
x_out = Conv1D(filters=32, kernel_size=5, strides=1)(x_out)
x_out = Flatten()(x_out)
x_out = Dense(units=128, activation="relu")(x_out)
x_out = Dropout(rate=0.5)(x_out)
predictions = Dense(units=1, activation="sigmoid")(x_out)
model = Model(inputs=x_inp, outputs=predictions)
model.compile(
optimizer=Adam(learning_rate=0.0001), loss=binary_crossentropy,
metrics=["accuracy", Precision(name='precision'), Recall(name='recall'), AUC(name='auc')],
)
train_graphs, test_graphs = model_selection.train_test_split(
graph_labels, train_size=0.8, test_size=None, stratify=graph_labels
)
gen = PaddedGraphGenerator(graphs=graphs)
# if use symmetric normalization, problem arise in negative degree values (because of negative correlations),
# and so can't take square root of those.
train_gen = gen.flow(
list(train_graphs.index),
targets=train_graphs.values,
batch_size=50,
symmetric_normalization=False,
weighted=True,
)
test_gen = gen.flow(
list(test_graphs.index),
targets=test_graphs.values,
batch_size=1,
symmetric_normalization=False,
weighted=True,
)
history = model.fit(
train_gen, epochs=n_epochs, verbose=0, validation_data=test_gen, shuffle=True
)
# Print test set metrics
test_metrics = model.evaluate(test_gen, verbose=0)
print(f'Test Set Metrics: ')
for name, val in zip(model.metrics_names, test_metrics):
print("\t{}: {:0.4f}".format(name, val))
return model, test_metrics, history
class ImportanceDGCNN:
def __init__(self, W, model, node_features=None):
"""Initialize object for computing importance in the DGCNN graph classification model
Args:
W: dataframe of graphs with 4 columns: graph_id, source, target, weight
node_features: used to build StellarGraph graph instance, same as used in read_graphs
model: the trained keras model of DGCNN
"""
# Take any graph from W to find its nodes and edges
gid0 = W.iloc[0].at['graph_id']
g0 = W[W['graph_id'] == gid0]
self.nodes = list(set(g0.source).union(set(g0.target)))
self.edges = list(zip(g0.source, g0.target))
self.ngraphs = W.groupby('graph_id').ngroups
self.model = model
if not node_features:
node_features = sg.IndexedArray(np.identity(len(self.nodes)), index=list(self.nodes))
self._W = W
self._node_features = node_features
# Check if all graphs have same set of edges
for _, g in W.groupby('graph_id'):
if set(zip(g.source, g.target)) != set(self.edges):
raise ValueError("Not all graphs have the same set of edges. This case is not implemented.")
def _null_edge_graphs(self, val=0):
""" Generator of StellarGraph graphs with exactly one edge set to 'val' (default 0)
"""
for src, tar in self.edges:
cond = (self._W['source'] == src) & (self._W['target'] == tar)
W2 = self._W.copy()
W2['weight'].mask(cond, val, inplace=True) # set weights corresonding to edge to 0
for _, g in W2.groupby('graph_id'):
yield StellarGraph(nodes=self._node_features, edges=g.drop(columns='graph_id'),
node_type_default='microbe', edge_type_default='correlation')
def _null_node_graphs(self):
""" Generator of StellarGraph graphs with all edges incident to a node set to 0
"""
for n in self.nodes:
cond = (self._W['source'] == n) | (self._W['target'] == n)
W2 = self._W.copy()
W2['weight'].mask(cond, 0, inplace=True)
for _, g in W2.groupby('graph_id'):
yield StellarGraph(nodes=self._node_features, edges=g.drop(columns='graph_id'),
node_type_default='microbe', edge_type_default='correlation')
def _null_2nodes_graphs(self):
"""Generator of StellarGraph graphs with all edges incident to two nodes set to 0
"""
for n1, n2 in self.edges:
cond1 = (self._W['source'] == n1) | (self._W['target'] == n1)
cond2 = (self._W['source'] == n2) | (self._W['target'] == n2)
W2 = self._W.copy()
W2['weight'].mask(cond1 | cond2, 0, inplace=True)
for _, g in W2.groupby('graph_id'):
yield StellarGraph(nodes=self._node_features, edges=g.drop(columns='graph_id'),
node_type_default='microbe', edge_type_default='correlation')
def _null_nnodes_graphs(self, nlist):
""" Generator of StellarGraph graphs with all edges incident to n nodes set to 0,
Assume the first n-1 nodes are given as nlist, the generator then generates
graphs where each of the remaining len(self.nodes) - len(nlist) is added to
the given n-1 nodes, and edges linked to the resulting n nodes are set to 0.
"""
from functools import reduce
import operator
if not set(nlist).issubset(self.nodes):
raise ValueError("Not all provided nodes are found in the graph")
conds = [(self._W['source'] == nd) | (self._W['target'] == nd) for nd in nlist]
for n in self.nodes:
if n in nlist:
continue
combined_cond = conds + [(self._W['source'] == n) | (self._W['target'] == n)]
reduced_cond = reduce(operator.or_, combined_cond)
W2 = self._W.copy()
W2['weight'].mask(reduced_cond, 0, inplace=True)
for _, g in W2.groupby('graph_id'):
yield StellarGraph(nodes=self._node_features, edges=g.drop(columns='graph_id'),
node_type_default='microbe', edge_type_default='correlation')
@staticmethod
def _batch(iterable, n):
""" Generate prediction batch of size n, using the grouper idiom """
iters = [iter(iterable)] * n
return zip(*iters)
@staticmethod
def compute_lor(pred, P_new):
""" Compute log-odds ratio between new and original predicted probs
Args:
pred: prediction on the original graphs, output of model.predict(),
shape N-by-1, where N number of graph instances
P_new: predicition on new graphs, shape N-by-K, where K = number of
edges/nodes depending on edge or node importance
Returns:
numpy array same shape as P_new
"""
eps = 1e-6
lo1 = np.log(P_new+eps) - np.log(1-P_new+eps)
lo2 = np.log(pred+eps) - np.log(1-pred+eps)
return lo1 - lo2
def read_sg(self):
""" Read graphs into list of StellarGraph instances """
out = list()
for _,g in self._W.groupby('graph_id'):
out.append(StellarGraph(nodes=self._node_features,
edges=g.drop(columns='graph_id'),
node_type_default='microbe',
edge_type_default='correlation'))
return out
def predict_graph(self, graphs):
"""Use the model to predict the probability of positive class
Args:
graphs: list of StellarGraph graph instances
"""
fl = PaddedGraphGenerator(graphs=graphs).flow(range(len(graphs)),
batch_size=len(graphs),
symmetric_normalization=False,
weighted=True)
return self.model.predict(fl)
def edge_imp(self, set_wt=0):
"""Calclate edge importance by change in log-odds between the original graph and one
where an edge weight is set to 'set_wt' (default 0)
"""
sg = self.read_sg()
pred = self.predict_graph(sg)
P_new = np.empty((self.ngraphs, len(self.edges)))
gen = self._null_edge_graphs(set_wt)
for i, bch in enumerate(ImportanceDGCNN._batch(gen, self.ngraphs)):
pred_new = self.predict_graph(list(bch)).reshape(-1)
P_new[:,i] = pred_new
print(f'{i}: EDGE {self.edges[i]} DONE.')
LR = ImportanceDGCNN.compute_lor(pred, P_new)
stats = self.summary_stats(LR, 'edge')
self.LR_edge, self.LR_edge_stats = LR, stats
return stats, LR
def node_imp(self):
"""Calclate node importance by change in log-odds between the original graph and one
where all edges linked to a node is set to 0.
"""
sg = self.read_sg()
pred = self.predict_graph(sg)
P_new = np.empty((self.ngraphs, len(self.nodes)))
gen = self._null_node_graphs()
for i, bch in enumerate(ImportanceDGCNN._batch(gen, self.ngraphs)):
pred_new = self.predict_graph(list(bch)).reshape(-1)
P_new[:,i] = pred_new
print(f'{i}: NODE {self.nodes[i]} DONE.')
LR = ImportanceDGCNN.compute_lor(pred, P_new)
stats = self.summary_stats(LR, 'node')
self.LR_node, self.LR_node_stats = LR, stats
return stats, LR
def node_pair_imp(self):
"""Calculate node pair importance by knocking out each pair of nodes
"""
sg = self.read_sg()
pred = self.predict_graph(sg)
P_new = np.empty((self.ngraphs, len(self.edges)))
gen = self._null_2nodes_graphs()
for i, bch in enumerate(ImportanceDGCNN._batch(gen, self.ngraphs)):
pred_new = self.predict_graph(list(bch)).reshape(-1)
P_new[:,i] = pred_new
print(f'{i}: NODES {self.edges[i][0], self.edges[i][1]} DONE.')
LR = ImportanceDGCNN.compute_lor(pred, P_new)
stats = self.summary_stats(LR, 'node2')
self.LR_node2, self.LR_node2_stats = LR, stats
return stats, LR
def nnode_imp(self, n):
"""Calculate n-node importance by knocking out n nodes, using a greedy search
strategy where after the first node resulting in maximum change in log-odds
is found, the node from the remaining node set resulting in maximun change
in log-odds is added to form 2-node, and it continues until n nodes are added
Returns:
List of tuples. The first component is the names of knocked-out nodes,
the second component is LR of shape (n_graphs, n_nodes-k+1),
with n_nodes-k+1 equal to the length of the first component.
Return all k-node importance from k = 1 to n
"""
sg = self.read_sg()
pred = self.predict_graph(sg)
n_full = list(self.nodes)
nlist = []
out = []
for k in range(1, n+1):
P_new = np.empty((self.ngraphs, len(self.nodes)-k+1))
if k == 1:
gen = self._null_node_graphs()
else:
gen = self._null_nnodes_graphs(nlist)
for i, bch in enumerate(ImportanceDGCNN._batch(gen, self.ngraphs)):
pred_new = self.predict_graph(list(bch)).reshape(-1)
P_new[:,i] = pred_new
# Find which node to add to nlist
LR = ImportanceDGCNN.compute_lor(pred, P_new)
maxi = np.argmax(np.median(np.abs(LR), axis=0)) # index of node with max median absolute LR
n_remain = [nn for nn in n_full if nn not in nlist]
out.append(([tuple(nlist + [x]) for x in n_remain], LR))
nlist = nlist + [n_remain[maxi]]
return out
def summary_stats(self, LR, which):
""" Get mean, median and std err of log-odds ratio """
lor_mean, lor_med = np.mean(LR, axis=0), np.median(LR, axis=0)
lor_std = np.std(LR, axis=0)
df = pd.DataFrame({'lor_mean': lor_mean,
'lor_med': lor_med,
'std_err': lor_std/np.sqrt(LR.shape[0])})
if which == 'edge':
df['source'] = [e[0] for e in self.edges]
df['target'] = [e[1] for e in self.edges]
if which == 'node':
df['node'] = self.nodes
if which == 'node2':
df['node1'] = [e[0] for e in self.edges]
df['node2'] = [e[1] for e in self.edges]
return df
def save_imp_res(node_res, edge_res=None, runid='run'):
with open('imp_' + runid + '.pkl', 'wb') as pickle_out:
pickle.dump(node_res, pickle_out) # serialize node importance result
if edge_res:
pickle.dump(edge_res, pickle_out)
# Main script
def main():
K = 10 # number of DGCNN runs
N_EPOCH = 100 # number of epochs per run
IMP_SUBSAMPLE = 0.3 # fraction of graphs sampled per DGCNN run used to calculate importance,
# decrease this if out-of-memory
N = 21 # up to N-node importance
W_ctrl = pd.read_csv('W_mbqc_ctrl.csv', dtype={'graph_id': 'int'})
W_case = pd.read_csv('W_mbqc_case.csv', dtype={'graph_id': 'int'})
g_ctrl = read_graphs2(W_ctrl)
g_case = read_graphs2(W_case)
graphs = g_ctrl + g_case
graph_labels = pd.Series(len(g_ctrl) * ['0'] + len(g_case) * ['1'])
graph_labels = | pd.get_dummies(graph_labels, drop_first=True) | pandas.get_dummies |
# coding: utf-8
# ### Import
# In[1]:
from bs4 import BeautifulSoup
import requests
import numpy as np
import pandas as pd
import xgboost
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import *
from IPython.core.display import Image
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
import io
from sklearn.preprocessing import Imputer
import pydot
from sklearn import preprocessing
import lightgbm as lgb
from scipy.stats import mode
import re
from datetime import datetime
from lightgbm import plot_importance
import warnings
warnings.filterwarnings('ignore')
# ---
# ### Date read
# In[12]:
age_gender_bkts = pd.read_csv("age_gender_bkts.csv")
countries = pd.read_csv("countries.csv")
sessions = pd.read_csv("sessions.csv")
test_users = pd.read_csv("test_users.csv")
train_users_2 = pd.read_csv("train_users_2.csv")
sample_submission_NDF = | pd.read_csv("sample_submission_NDF.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import missingno as ms
import re
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix,f1_score
from sklearn.metrics import accuracy_score
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
train_data = pd.read_csv('dataset.csv')
train_data.info()
print(train_data['label'].value_counts())
def drop_features(features,data):
data.drop(features,inplace=True,axis=1)
a = re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])", " ","Hahahahahaa chal janu zaroor 😂😂😂😂😂")
print(a)
def process_text(text):
return " ".join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])", " ", str(text).lower()).split())
train_data['processed_text'] = train_data['text'].apply(process_text)
print(train_data.head(10))
drop_features(['id','text'],train_data)
print(train_data.head(10))
train_data.info()
file = open("rusp.txt","r")
a = file.readlines()
aa = []
for w in a:
aa.append(process_text(w))
stop_words = set(aa)
file.close()
def process_stopwords(text):
stop_words = set(aa)
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
return " ".join(filtered_sentence)
train_data['processed_text2'] = train_data['processed_text'].apply(process_stopwords)
drop_features(['processed_text'],train_data)
print(train_data.head(10))
train_data.info()
x_train, x_test, y_train, y_test = train_test_split(train_data["processed_text2"],train_data["label"], test_size = 0.2, random_state = 42)
count_vect = CountVectorizer()
transformer = TfidfTransformer(norm='l2',sublinear_tf=True)
x_train_counts = count_vect.fit_transform(x_train)
x_train_tfidf = transformer.fit_transform(x_train_counts)
print(x_train_counts.shape)
print(x_train_tfidf.shape)
x_test_counts = count_vect.transform(x_test)
x_test_tfidf = transformer.transform(x_test_counts)
print(x_test_counts.shape)
print(x_test_tfidf.shape)
model = RandomForestClassifier(n_estimators=200)
model.fit(x_train_tfidf,y_train)
predictions = model.predict(x_test_tfidf)
print(confusion_matrix(y_test,predictions))
f1_score(y_test,predictions)
print(accuracy_score(y_test,predictions))
#Preparing test data
test_data = | pd.read_csv('test.csv') | pandas.read_csv |
def count_used_modules_per_team(pipelines):
from collections import defaultdict
import pandas as pd
team_module_count = defaultdict(lambda: defaultdict(int))
for pipeline in pipelines:
source_name = pipeline['pipeline_source']['name']
for module in pipeline['steps']:
module_id = '.'.join(module['primitive']['python_path'].split('.')[2:])
team_module_count[module_id][source_name] += 1
df = pd.DataFrame(team_module_count).fillna(0)
df = df.reset_index().melt(id_vars="index")
return df
df = count_used_modules_per_team(pipelines_problem["185_baseball_problem_TRAIN"])
alt.Chart(df).mark_circle().encode(x = "variable",
y="value",
color='index',
tooltip=['index','variable', 'value']
).properties(
title='Module usage per team'
)
def count_hyperparameters_per_team(pipelines):
from collections import defaultdict
import pandas as pd
team_module_param_count = defaultdict(lambda: defaultdict(int))
for pipeline in pipelines:
source_name = pipeline['pipeline_source']['name']
for module in pipeline['steps']:
module_id = '.'.join(module['primitive']['python_path'].split('.')[2:])
if ('hyperparams' in module):
team_module_param_count[module_id][source_name] += 1
else:
team_module_param_count[module_id][source_name] += 0
df = | pd.DataFrame(team_module_param_count) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, List, Tuple, Union
from typing import NewType, Callable, Iterable
from typing import Mapping, Sequence, TypeVar, Generic, Any
import multiprocessing as mp
from subprocess import Popen, PIPE, call
import fiona # type: ignore
from lcmodel_typed import LC_Initialize, compute_simple_statistics
import pandas as pd # type: ignore
import numpy as np # type: ignore
import logging
import datetime
from operator import itemgetter
import sys
import os
log_level = logging.INFO
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
fh = logging.FileHandler('script_output-para_stats_forest_loss_no_class_single.log')
console = logging.StreamHandler(sys.stdout)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
# logger.addHandler(console)
logger.addHandler(fh)
# these are class/year based
single_metrics = ['Edge density',
'Mean patch area',
'Median patch area',
'Euclidean Nearest-Neighbor Distance',
'Like adjacencies',
'Overall Core area',
'Patch cohesion index']
# these are landscape based without class
land_metrics = ["LC_Mean",
"LC_Min",
"LC_Sum",
"LC_Max",
"LC_SD",
"LC_LQua",
"LC_Med",
"LC_UQua",
"DIV_SI"]
def yield_features(filename) -> Iterable[str]:
with fiona.open(path=filename) as fh:
for feat in fh:
# print(feat)
if str(feat.get("id")) == "32956":
pass
yield str(feat.get("id"))
def calculate_for_feature(featureid: str) -> Dict:
# logger.info("run lcmodel stats over the tile and store result table")
folder = "../forest_loss_all"
raster = os.path.join(
folder, "amz_prode_NA_utm_corrected_tile_{}.tif".format(featureid))
# initialise per tile values empty dict
results_dict: Dict[str, Union[str, float]] = {}
results_dict['tile_id'] = featureid
try:
lc_calc = LC_Initialize(raster)
lc_calc.create_cl_array_for_class(None)
lc_calc.f_ccl(lc_calc.cl_array)
for smt in single_metrics:
met_tup = lc_calc.execSingleMetric(smt, None)
results_dict[smt] = met_tup[1]
except Exception as ex:
logger.error(ex)
for smt in single_metrics:
results_dict[smt] = 0.0
logger.info("For tile {} -> {}".format(featureid, results_dict))
return results_dict
if __name__ == '__main__':
mp.freeze_support()
num_cores: int = int(mp.cpu_count()/4)
start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger.info(
"initialising at ... {} with num cores: {}".format(start, num_cores))
fishnet = "calc_grid_10km_basic.shp"
########################################
# initialise empty dict for land metrics
########################################
# initialise empty dict
data_dict: Dict[str, List] = {}
data_dict['tile_id'] = []
for smt in single_metrics:
data_dict[smt] = []
# Run processes for single metrics
pool = mp.Pool(processes=num_cores)
results: List[Dict] = [pool.apply(
calculate_for_feature, args=(x, )) for x in yield_features(fishnet)]
pool.close()
sorted_results = sorted(results, key=itemgetter('tile_id'))
for res in sorted_results:
data_dict['tile_id'].append(res.get("tile_id"))
for smt in single_metrics:
data_dict[smt].append(res.get(smt))
# d = {'tile_id': [1, 2], 'patch_area': [3, 4]} ...
df = | pd.DataFrame(data=data_dict) | pandas.DataFrame |
import requests
import pandas as pd
import numpy as np
import arviz as az
idx = pd.IndexSlice
def get_raw_covidtracking_data():
""" Gets the current daily CSV from COVIDTracking """
url = "https://covidtracking.com/api/v1/states/daily.csv"
data = pd.read_csv(url)
return data
def process_covidtracking_data(data: pd.DataFrame, run_date: pd.Timestamp):
""" Processes raw COVIDTracking data to be in a form for the GenerativeModel.
In many cases, we need to correct data errors or obvious outliers."""
data = data.rename(columns={"state": "region"})
data["date"] = pd.to_datetime(data["date"], format="%Y%m%d")
data = data.set_index(["region", "date"]).sort_index()
data = data[["positive", "total"]]
# Too little data or unreliable reporting in the data source.
data = data.drop(["MP", "GU", "AS", "PR", "VI"])
# On Jun 5 Covidtracking started counting probable cases too
# which increases the amount by 5014.
# https://covidtracking.com/screenshots/MI/MI-20200605-184320.png
data.loc[idx["MI", pd.Timestamp("2020-06-05") :], "positive"] -= 5014
# From CT: On June 19th, LDH removed 1666 duplicate and non resident cases
# after implementing a new de-duplicaton process.
data.loc[idx["LA", pd.Timestamp("2020-06-19") :], :] += 1666
# Now work with daily counts
data = data.diff().dropna().clip(0, None).sort_index()
# Michigan missed 6/18 totals and lumped them into 6/19 so we've
# divided the totals in two and equally distributed to both days.
data.loc[idx["MI", pd.Timestamp("2020-06-18")], "total"] = 14871
data.loc[idx["MI", pd.Timestamp("2020-06-19")], "total"] = 14871
# Note that when we set total to zero, the model ignores that date. See
# the likelihood function in GenerativeModel.build
# Huge outlier in NJ causing sampling issues.
data.loc[idx["NJ", pd.Timestamp("2020-05-11")], :] = 0
# Huge outlier in CA causing sampling issues.
data.loc[idx["CA", pd.Timestamp("2020-04-22")], :] = 0
# Huge outlier in CA causing sampling issues.
# TODO: generally should handle when # tests == # positives and that
# is not an indication of positive rate.
data.loc[idx["SC", pd.Timestamp("2020-06-26")], :] = 0
# Two days of no new data then lumped sum on third day with lack of new total tests
data.loc[idx["OR", pd.Timestamp("2020-06-26") : pd.Timestamp("2020-06-28")], 'positive'] = 174
data.loc[idx["OR", pd.Timestamp("2020-06-26") : pd.Timestamp("2020-06-28")], 'total'] = 3296
#https://twitter.com/OHdeptofhealth/status/1278768987292209154
data.loc[idx["OH", pd.Timestamp("2020-07-01")], :] = 0
# Nevada didn't report total tests this day
data.loc[idx["NV", pd.Timestamp("2020-07-02")], :] = 0
# A bunch of incorrect values for WA data so nulling them out.
data.loc[idx["WA", pd.Timestamp("2020-06-05") : | pd.Timestamp("2020-06-07") | pandas.Timestamp |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns= | MultiIndex.from_tuples([[0]], names=["c1"]) | pandas.MultiIndex.from_tuples |
import pandas as pd
import time
tic = time.time()
fullpred1 = '/input/compare/recommend.csv'
mapping_file_user = '/input/data_validation/userdict.csv'
mapping_file_item = '/input/data_validation/itemdict.csv'
top_count = 5
fullpred = pd.read_csv(fullpred1)
mapping_item = pd.read_csv(mapping_file_item)
mapping_user = pd.read_csv(mapping_file_user)
def predict(data):
# TODO: convert y to dataframe
y = pd.DataFrame({'user_id': [data["user_id"]]})
#y = pd.DataFrame([y],columns=['user_id'])
mapping_user['originaluser_id'] = mapping_user['originaluser_id'].astype(
str)
mapping_item['originalitem_id'] = mapping_item['originalitem_id'].astype(
str)
# convert input user ids to strings and rename the userid column to originaluser_id
user_id_input = pd.DataFrame(
list(y['user_id'].astype('str')), columns=['originaluser_id'])
# convert user ids to internal ids
converted_user_id = mapping_user.merge(
user_id_input, on='originaluser_id', how='inner')
# fullpred.dtypes
# get all the predictions for all items for the users requested
results = fullpred.merge(converted_user_id, on='user_id', how='inner')
print(results.columns)
groups = results.groupby('user_id', sort=True)
finaloutput = pd.DataFrame(columns=['originaluser_id', 'originalitem_id'])
for group in groups.groups.keys():
# get top k recommendations for a user in the results
group_length = round(len(groups.get_group(group))*0.25)
recommendations = groups.get_group(group).sort_values(
'score', ascending=False).head(group_length)
# convert top k recommendations from internal to external
recommendations = recommendations.merge(mapping_item, on='item_id', how='inner')[
['originaluser_id', 'originalitem_id']]
finaloutput = pd.concat(
[finaloutput, recommendations], ignore_index=True)
finaloutput = finaloutput.rename(
{'originaluser_id': 'user_id', 'originalitem_id': 'item_id'}, axis=1)
finaloutput_2 = | pd.DataFrame(columns=['user_id', 'item_id']) | pandas.DataFrame |
from typing import Optional
from dataclasses import dataclass
import pandas as pd
from poker.base import unique_values, native_mean, running_mean, running_std, running_median, running_percentile
from poker.document_filter_class import DocumentFilter
pd.set_option('use_inf_as_na', True)
def _ts_concat(dic: dict, index_lst: list) -> pd.DataFrame:
"""Concat a dict of dicts or pd.DataFrames"""
lst_df = []
for key, val in dic.items():
if type(val) != pd.DataFrame:
val = pd.DataFrame(val, index=index_lst)
val.columns = [key + ' ' + col if col != '' else key for col in val.columns]
else:
val.columns = [key]
lst_df.append(val)
final_df = pd.concat(lst_df, axis=1).reset_index()
return final_df
def _ts_hand(data: pd.DataFrame) -> pd.DataFrame:
"""Build Hand related data"""
pos_dic = {'Pre Flop': 0.25, 'Post Flop': 0.50, 'Post Turn': 0.75, 'Post River': 1.0}
# Game Id
g_i_df = pd.DataFrame(data.groupby('Start Time')['Game Id'].last())
g_i_df.columns = ['']
# Time in Hand
t_h_df = pd.DataFrame(data.groupby('Start Time')['Seconds into Hand'].last())
t_h_df.columns = ['']
# Last Position
last_position = data.groupby('Start Time')['Position'].last().tolist()
l_p_df = pd.DataFrame([pos_dic[item] for item in last_position], index=t_h_df.index, columns=[''])
# Win
r_w_p = data.groupby('Start Time')['Win'].last().tolist()
r_w_p = [1 if item is True else 0 for item in r_w_p]
r_w_p_df = pd.DataFrame(running_mean(data=r_w_p, num=5), index=t_h_df.index, columns=[''])
ind_lst = data.groupby('Start Time').last().index.tolist()
lst_dic = {'Seconds per Hand': t_h_df, 'Last Position in Hand': l_p_df, 'Rolling Win Percent': r_w_p_df,
'Game Id': g_i_df}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_position(data: pd.DataFrame) -> pd.DataFrame:
"""Build position related data"""
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
p_bet = {'Pre Flop': [], 'Post Flop': [], 'Post Turn': [], 'Post River': []}
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
t_p_bet = {'Pre Flop': 0, 'Post Flop': 0, 'Post Turn': 0, 'Post River': 0}
t_p_bet[row['Position']] += row['Bet Amount']
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for key, val in t_p_bet.items():
p_bet[key].append(val)
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Position Bet': p_bet, 'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
def _ts_class_counts_seconds(data: pd.DataFrame) -> pd.DataFrame:
"""Build class, counts, and seconds data"""
# Bet, Count, and Time Per Position
temp_df = data[(data['Class'] == 'Calls') | (data['Class'] == 'Raises') | (data['Class'] == 'Checks')]
pos_lst = ['Pre Flop', 'Post Flop', 'Post Turn', 'Post River']
class_lst, short_class_lst = ['Checks', 'Calls', 'Raises'], ['Calls', 'Raises']
c_count = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_seconds = {item1 + ' ' + item: [] for item in class_lst for item1 in pos_lst}
c_bet = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_pot = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
c_bet_per_chips = {item1 + ' ' + item: [] for item in short_class_lst for item1 in pos_lst}
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
prev_ind, len_temp_df, game_id_lst = temp_df['Start Time'].iloc[0], len(temp_df), []
for ind, row in temp_df.iterrows():
if row['Start Time'] != prev_ind:
prev_ind = row['Start Time']
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
t_c_count = {item1 + ' ' + item: 0 for item in class_lst for item1 in pos_lst}
t_c_seconds = {item1 + ' ' + item: None for item in class_lst for item1 in pos_lst}
t_c_bet = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_pot = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_c_bet_per_chips = {item1 + ' ' + item: None for item in short_class_lst for item1 in pos_lst}
t_pos, t_bet, t_class, t_second = row['Position'], row['Bet Amount'], row['Class'], row['Seconds']
t_key = t_pos + ' ' + t_class
t_c_count[t_key] += 1
if t_c_seconds[t_key] is not None:
t_c_seconds[t_key] = native_mean(data=[t_c_seconds[t_key]] + [t_second])
else:
t_c_seconds[t_key] = t_second
if t_class != 'Checks':
if t_c_bet[t_key] is not None:
t_c_bet[t_key] = native_mean(data=[t_c_bet[t_key]] + [t_bet])
else:
t_c_bet[t_key] = t_bet
bet_pot_per = t_bet / (row['Pot Size'] - t_bet)
if t_c_bet_per_pot[t_key] is not None:
t_c_bet_per_pot[t_key] = native_mean(data=[t_c_bet_per_pot[t_key]] + [bet_pot_per])
else:
t_c_bet_per_pot[t_key] = bet_pot_per
bet_chip_per = t_bet / (row['Player Current Chips'] + t_bet)
if t_c_bet_per_chips[t_key] is not None:
t_c_bet_per_chips[t_key] = native_mean(data=[t_c_bet_per_chips[t_key]] + [bet_chip_per])
else:
t_c_bet_per_chips[t_key] = bet_chip_per
if ind == len_temp_df:
game_id_lst.append(row['Game Id'])
for item in class_lst:
for item1 in pos_lst:
c_count[item1 + ' ' + item].append(t_c_count[item1 + ' ' + item])
c_seconds[item1 + ' ' + item].append(t_c_seconds[item1 + ' ' + item])
if item != 'Checks':
c_bet[item1 + ' ' + item].append(t_c_bet[item1 + ' ' + item])
c_bet_per_pot[item1 + ' ' + item].append(t_c_bet_per_pot[item1 + ' ' + item])
c_bet_per_chips[item1 + ' ' + item].append(t_c_bet_per_chips[item1 + ' ' + item])
ind_lst = unique_values(data=temp_df['Start Time'].tolist())
lst_dic = {'Class Count': c_count, 'Class Seconds': c_seconds, 'Class Bet': c_bet,
'Class Bet Percent of Pot': c_bet_per_pot, 'Class Bet Percent of Chips': c_bet_per_chips,
'Game Id': {'': game_id_lst}}
return _ts_concat(dic=lst_dic, index_lst=ind_lst)
@dataclass
class TSanalysis:
"""
Calculate Time Series stats for a player.
:param data: Input DocumentFilter.
:type data: DocumentFilter
:param upper_q: Upper Quantile percent, default is 0.841. *Optional*
:type upper_q: float
:param lower_q: Lower Quantile percent, default is 0.159. *Optional*
:type lower_q: float
:param window: Rolling window, default is 5. *Optional*
:type window: int
:example:
>>> from poker.time_series_class import TSanalysis
>>> docu_filter = DocumentFilter(data=poker, player_index_lst=['DZy-22KNBS'])
>>> TSanalysis(data=docu_filter)
:note: This class expects a DocumentFilter with only one player_index used.
"""
def __init__(self, data: DocumentFilter, upper_q: Optional[float] = 0.841, lower_q: Optional[float] = 0.159,
window: Optional[int] = 5):
self._docu_filter = data
self._window = window
self._upper_q = upper_q
self._lower_q = lower_q
self._df = data.df
hand_df = _ts_hand(data=self._df)
self._hand = hand_df.copy()
position_df = _ts_position(data=self._df)
self._position = position_df.copy()
class_df = _ts_class_counts_seconds(data=self._df)
self._class = class_df.copy()
hand_cols, hand_ind = hand_df.columns, hand_df.index
self._hand_mean = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_std = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_median = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_upper_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
self._hand_lower_q = pd.DataFrame(columns=hand_cols, index=hand_ind)
for col in hand_cols:
if col not in ['Game Id', 'index', 'Start Time']:
self._hand_mean[col] = running_mean(data=hand_df[col], num=self._window)
self._hand_std[col] = running_std(data=hand_df[col], num=self._window)
self._hand_median[col] = running_median(data=hand_df[col], num=self._window)
self._hand_upper_q[col] = running_percentile(data=hand_df[col], num=self._window, q=upper_q)
self._hand_lower_q[col] = running_percentile(data=hand_df[col], num=self._window, q=lower_q)
pos_cols, pos_ind = position_df.columns, position_df.index
self._position_mean = pd.DataFrame(columns=pos_cols, index=pos_ind)
self._position_std = | pd.DataFrame(columns=pos_cols, index=pos_ind) | pandas.DataFrame |
import numpy as np
import pandas as pd
from symuviapy.symfunc import updatelist
DT = 0.1 # Sample time
KC = 0.16 # CAV max density
KH = 0.0896 # HDV max density
VF = 25.0 # Speed free flow
W = 6.25 # Congestion speed
E = 25.0*0.3 # Speed drop for relaxation
GCAV = 1/(KC*W) # Time headway CAV
GHDV = 1/(KH*W) # Time headway HDV
SCAV = VF/(KC*W)+1/KC # Desired space headway CAV
SHDV = VF/(KH*W)+1/KH # Desired space headway HDV
dveh_twy = {'CAV': GCAV, 'HDV': GHDV}
dveh_dwy = {'CAV': 1/KC, 'HDV': 1/KH}
U_MAX = 1.5 # Max. Acceleration
U_MIN = -1.5 # Min. Acceleration
# Imposed leadership
dveh_ldr = {0: 0, 1: 0, 2: 1, 3: 2, 5: 3, 6: 5, 8: 6, 9: 8}
dveh_idx = {0: 0, 1: 1, 2: 2, 3: 3, 5: 4, 6: 5, 8: 6, 9: 7}
def reversedEnumerate(*args):
""" Inverse enumeration iterator"""
revArg = [np.flip(x, axis=0) for x in args]
return zip(range(len(args[0])-1, -1, -1), *revArg)
def find_idx_ldr(results): # , lPlatoon = None):
# """ From dbQuery finds idx or leader for CAVs"""
# Frozen network (A-priori)
# if lPlatoon is not None:
# key = lPlatoon
# idx = list(range(len(key)))
# ldr = [idx[0]]+idx[:-1]
# dveh_ldr = dict(zip(key,ldr))
# dveh_idx = dict(zip(key,idx))
ldrl = [dveh_ldr[x[1]] for x in results if x[2] == 'CAV']
idx_ldr = [dveh_idx[x] for x in ldrl]
return idx_ldr, ldrl
def initial_setup_mpc(results, h_ref):
""" Initialize variables for controller
"""
TGref = h_ref # format_reference(h_ref)
h = TGref.shape[0]
n_CAV = len([ty[2] for ty in results if ty[2] == 'CAV'])
dCAVu = [h, n_CAV]
# print(f'Dimensions control: {dCAVu}')
Sref = np.zeros(dCAVu)
S = np.zeros(dCAVu)
V = np.zeros(dCAVu)
DV = np.zeros(dCAVu)
Lv = np.zeros(dCAVu)
Ls = np.zeros(dCAVu)
return (Sref, TGref, S, V, DV, Ls, Lv)
def format_reference(h_ref):
""" Convert query from a reference into a
numpy array
"""
# Rearrange
refDf = | pd.DataFrame(h_ref, columns=['ti', 'id', 'gapt']) | pandas.DataFrame |
# -*- coding: UTF-8 -*-
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from root_project import ROOT_DIR
FONTSIZE = 20
if __name__ == "__main__":
# Set the working directory to the root of the project
print(os.getcwd())
os.chdir(ROOT_DIR)
print(os.getcwd())
pv_solution = pd.read_csv('data1/uliege/uliege_15min.csv', index_col=0, parse_dates=True)['Pm']
pv_forecast = pd.read_csv('data1/uliege/uliege_pv_prediction_python_15min.csv', parse_dates=True, index_col=0)
# day 1 -> moyen
# day 2 -> sunny
# day 3 -> cloudy
day = "3"
day_list = ['2019-08-24', '2019-11-16', '2019-12-02']
day_names = ['soleil_08-24-19', 'moyen_16-11-19', 'nuage_02-12-19']
df_quantiles = []
for name, day in zip(day_names, day_list):
df_input = pd.read_csv('data1/uliege/quantiles/'+name+'.csv', index_col=0, parse_dates=True)
time_range = pd.date_range(start=pd.Timestamp(day+' 03:30:00'), periods=63, freq='15T')
df = | pd.DataFrame(data=df_input.values, index=time_range) | pandas.DataFrame |
import matplotlib.pyplot as plt
import csv
from collections import defaultdict
import numpy as np
#from scipy.signal import *
from numpy.fft import *
#from scipy import *
from pylab import *
#import pywt
import pandas as pd
import statistics
###############----This function averages data points for each second----##################
def data_processing():
chan = ['Timestamp','POW.AF3.Theta', 'POW.AF3.Alpha', 'POW.AF3.BetaL', 'POW.AF3.BetaH', 'POW.AF4.Theta', 'POW.AF4.Alpha', 'POW.AF4.BetaL', 'POW.AF4.BetaH']
time, af3_theta, af3_alpha, af3_betal, af3_betah, af4_theta, af4_alpha, af4_betal, af4_betah = ([] for i in range(9))
frequency=0 #counts the frequency
count=0 #to let me know then the progtam will end. basically counts number of seconds in a video
i=1 #for number of participants
j=0 #for number of videos
while i<=6:
print("i", i)
j=0
while j<4:
print("j",j)
# ====================================== YOUR PATH GOES HERE ==================================================
path_out = "/Your Path/Project/datasets/intermediate_files/video"+str(j)+"_p"+str(i)+".csv"
fout_data = open(path_out, 'w')
# ====================================== YOUR PATH GOES HERE ==================================================
path_in= "/Your Path/Project/datasets/p"+str(i)+"/p"+str(i)+"_vid_"+str(j)+".csv"
df = pd.read_csv(path_in, header=1)
df1=df[['Timestamp', 'POW.AF3.Theta', 'POW.AF3.Alpha', 'POW.AF3.BetaL', 'POW.AF3.BetaH', 'POW.AF4.Theta', 'POW.AF4.Alpha', 'POW.AF4.BetaL', 'POW.AF4.BetaH']]
for ch in chan:
if ch=="POW.AF4.BetaH":
fout_data.write(ch)
else:
fout_data.write(ch+",")
fout_data.write("\n")
m=0
while m < (len(df1)):
frequency=0
# print("m", m)
# print("\n")
for n in range(m, len(df1)):
if int(df1.loc[m, "Timestamp"])== int(df1.loc[n, "Timestamp"]):
frequency=frequency+1
time.append(df1.loc[n,"Timestamp"])
af3_theta.append(df1.loc[n, "POW.AF3.Theta"])
af3_alpha.append(df1.loc[n, "POW.AF3.Alpha"])
af3_betal.append(df1.loc[n, "POW.AF3.BetaL"])
af3_betah.append(df1.loc[n, "POW.AF3.BetaH"])
af4_theta.append(df1.loc[n, "POW.AF4.Theta"])
af4_alpha.append(df1.loc[n, "POW.AF4.Alpha"])
af4_betal.append(df1.loc[n, "POW.AF4.BetaL"])
af4_betah.append(df1.loc[n, "POW.AF4.BetaH"])
else:
break
count=count+1
time_m=statistics.mean(time)
af3_theta_m=statistics.mean(af3_theta)
af3_alpha_m=statistics.mean(af3_alpha)
af3_betal_m=statistics.mean(af3_betal)
af3_betah_m=statistics.mean(af3_betah)
af4_theta_m=statistics.mean(af4_theta)
af4_alpha_m=statistics.mean(af4_alpha)
af4_betal_m=statistics.mean(af4_betal)
af4_betah_m=statistics.mean(af4_betah)
del time[:]
del af3_theta[:]
del af3_alpha[:]
del af3_betal[:]
del af3_betah[:]
del af4_theta[:]
del af4_alpha[:]
del af4_betal[:]
del af4_betah[:]
fout_data.write(str(time_m)+",")
fout_data.write(str(af3_theta_m)+",")
fout_data.write(str(af3_alpha_m)+",")
fout_data.write(str(af3_betal_m)+",")
fout_data.write(str(af3_betah_m)+",")
fout_data.write(str(af4_theta_m)+",")
fout_data.write(str(af4_alpha_m)+",")
fout_data.write(str(af4_betal_m)+",")
fout_data.write(str(af4_betah_m))
fout_data.write("\n")
m=m+frequency
#print(count)
fout_data.close()
j=j+1
i=i+1
###############----This function gives graphs for each video and for alpha calibration----##################
def engagement_compositegraph():
i = 1 # for participants
j = 0 # for videos
s = 2
d = 5
c1 = 1 + (s / (1 + d))
c2 = 1 - (s / (1 + d))
ema_value = []
ema_time = []
l_norm=[]
x_limit = [185, 185, 80, 160]
df3 = pd.DataFrame(columns=["Time", "p1", "p2", "p3", "p4", "p5", "p6"])
while j < 4:
print("j", j)
i = 1
while i <= 6:
print("i", i)
# ====================================== YOUR PATH GOES HERE ==================================================
path_in = "/Your Path/Project/datasets/intermediate_files/video" + str(j) + "_p" + str(i) + ".csv"
df = | pd.read_csv(path_in, header=0) | pandas.read_csv |
'''
Script used to pull voting information from VoxCharta.org.
Accessed via the tamu.voxcharta.org platform.
Have added code accounting for the few posts that aren't
searchable, but DO exist on VoxCharta. Also added code to
choose the original post, should replacement posts also exist.
'''
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from datetime import datetime
import threading, time, getpass, sys, subprocess
import pandas as pd
import numpy as np
from datetime import datetime as dt
from datetime import timedelta
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# amount of time to wait
timeit = input('\nTime before closing browser (in seconds): ') # seconds
timeit = int(timeit)
# ------------------------ #
# -- creating dataframe -- #
# ------------------------ #
df_dtypes = {'order':int,'id':str,'date':str}
main_df = pd.read_csv('arXiv_posts.txt',sep='\t',dtype=df_dtypes) # main table of data
# total votes will be counted and, if possible, will track # of votes per day/week
df = pd.DataFrame({'id':[],'total_votes':[],'vote_rate':[]}) # dataframe to be created in script
# arXiv IDs to run through; note that you can query specific dates or other sorting criteria
#arXiv_ids = main_df.id.values # runs through full list
# unless uncommented above, will be specifying to only look at posts within the last 2 weeks
times = [dt.strptime(t,'%d %b %y') for t in main_df.date.values]
times = np.asarray(times) # useful for the sorting we'll do
# reference date of 2 weeks ago
ref_date = dt.now() - timedelta(days=14) # oof
print(f"Looking at posts from {dt.strftime(ref_date,'%a, %d %b %y')} and onwards.",end='\n\n')
indexing = np.arange(len(main_df))
indexing = indexing[times > ref_date] # only listing entries in the dataframe that are in the last 2 weeks
arXiv_ids = main_df.loc[indexing,'id'].values # IDs of posts within the last 2 weeks!
# ------------------------ #
logmein = False # option to log into VoxCharta account (note: currently broken)
# adding a timer to this to see how long it takes
start_it = dt.now()
print('Starting timer', start_it,end='\n\n')
# opening browser & going to VoxCharta
driver = webdriver.Firefox()
if logmein == True:
# pulling information to access arXiv account
username = input('\nVoxCharta username: ')
assert len(username) > 0, "Need to provide a username"
password = input('<PASSWORD>: ')
assert len(password) > 0, "Need to provide account password"
driver.get("https://tamu.voxcharta.org/wp-login.php")
assert "Log In" in driver.title
# finding log in cells
usern = driver.find_element_by_name("log")
passw = driver.find_element_by_name("pwd")
usern.clear()
passw.clear()
# adding log in info
usern.send_keys(username)
passw.send_keys(password)
# locating "Log In" button
login = driver.find_element_by_name("wp-submit")
login.click()
else:
driver.get("https://tamu.voxcharta.org/")
# not necessary but keeping anyway
#otherints = driver.find_element_by_name("show_everyone") # so can see all upvotes
#if otherints.is_selected() == False:
# otherints.click()
# print("Now showing votes from all institutions.")
for arXiv_id in arXiv_ids:
assert len(arXiv_id) == 10, f"Incorrect arXiv ID: {arXiv_id}."
print(f'\nSearching for {arXiv_id}.')
# now, finding the search bar and searching for paper title
try: search = driver.find_element_by_id("searchbox")
except: time.sleep(8); search = driver.find_element_by_id("searchbox")
search.clear()
search.send_keys(arXiv_id)
submit = driver.find_element_by_class_name("go")
submit.click()
time.sleep(6) # have to pause so the code doesn't try to search on previous page
# finding and clicking on result title
try: results = driver.find_elements_by_tag_name("h3") # looks at all h3 tags (b/c replacement posts)
except:
time.sleep(5) # waiting a little longer for it to load
results = driver.find_elements_by_tag_name("h3") # looks at all h3 tags (b/c replacement posts)
if len(results) > 1: # if there's no search result, this will be length == 1
# choosing the first posted, corrects for [REPLACEMENT] posts that will be top of the search results
result = results[-2] # This 2nd to last <h3> tag is the first post
# first checking that there isn't just 1 post that is the replacement
try: replacement_only = result.find_element_by_tag_name("a")
except: # for some reason this has been failing recently
try: time.sleep(5); replacement_only = result.find_element_by_tag_name("a")
except: # if for some reason it decides it can't find it, we'll start over
driver.get("https://tamu.voxcharta.org/")
time.sleep(3)
# locating search bar and inputting arXiv_id
search = driver.find_element_by_id("searchbox")
search.clear()
search.send_keys(arXiv_id)
submit = driver.find_element_by_class_name("go")
submit.click()
time.sleep(6) # have to pause so the code doesn't try to search on previous page
results = driver.find_elements_by_tag_name("h3") # looks at all h3 tags (b/c replacement posts)
if len(results) > 1:
result = results[-2]
replacement_only = result.find_element_by_tag_name("a")
else: replacement_only = results[0].find_element_by_tag_name("a")
if replacement_only.text[-13:] == '[Replacement]':
print('Original post not searchable?')
no_votes = -99 # so I know which ones don't come up in the search
# new filler df to log total votes
filler_df = pd.DataFrame({'id':[arXiv_id],'total_votes':[no_votes],\
'vote_rate':[f'']})
df = df.append(filler_df,ignore_index=True)
else:
result.click()
time.sleep(6) # have to pause so the code doesn't try to search on previous page
# finding total votes
try:
votes = driver.find_element_by_class_name("votedon")
total = votes.find_element_by_tag_name("b")
total = total.text.split(" '")[0]
print(f'Total votes: {total}')
votes_df = | pd.DataFrame({'cast_date':[],'cast_time':[]}) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = | pd.Series([False, False, False], name='b#c#d') | pandas.Series |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# TODO(lpe): Include in package
import pandas as pd
import os
from sklearn import preprocessing
import numpy as np
from scipy.stats import beta
import pathlib
import pickle
from clonesig.estimator import Estimator
from clonesig import mixin_init_parameters
try:
rows, columns = os.popen('stty size', 'r').read().split()
pd.set_option('display.width', int(columns))
| pd.set_option('display.max_columns', 200) | pandas.set_option |
import numpy as np
import pandas as pd
import time
def mean_user(x):
user_count=np.bincount(x[0])
zeros_train = np.array(np.where(user_count[1:len(user_count)] == 0))
non_zero_train = np.array([np.where(user_count[1:len(user_count)] != 0)])
times_user_train_correct = np.delete(user_count[1:len(user_count)], zeros_train)
mean_user = np.array(x.groupby([0])[2].mean())
full = np.repeat(mean_user,times_user_train_correct)
return np.array(full)
def user_avg(fn):
df = pd.DataFrame(fn)
ratings_user=pd.DataFrame(fn)
ratings_user=ratings_user.append(ratings_user)
user_average = df.groupby(by=0, as_index=False)[2].mean()
user_average = user_average.append(user_average)
global_average = np.mean(fn[:,2])
nfolds = 5
err_train=np.zeros(nfolds)
err_test=np.zeros(nfolds)
mae_train=np.zeros(nfolds)
mae_test=np.zeros(nfolds)
np.random.seed(1)
seqs=[x%nfolds for x in range(len(fn))]
np.random.shuffle(seqs)
start_time = time.time()
print ('Recommendations from all user averages:')
for fold in range(nfolds):
train_set=np.array([x!=fold for x in seqs])
test_set=np.array([x==fold for x in seqs])
train_user= | pd.DataFrame(ratings_user.iloc[train_set],columns=[0, 1, 2],dtype=int) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 20:28:41 2021
@author: Gunardilin
"""
import pandas as pd
from get_statement import open_in_excel
from datetime import datetime
def last_row(symbol, name):
requirements = [symbol.lower()=="total", name.isdigit()]
return all(requirements)
def get_foreigncompanies_info():
df_list = []
links = ["https://stockmarketmba.com/nonuscompaniesonusexchanges.php",
"https://stockmarketmba.com/listofadrs.php"]
for i in links:
df = pd.read_html(i)[0][['Symbol', 'Name', 'GICS Sector']]
if last_row(df.iloc[-1]['Symbol'], df.iloc[-1]['Name']):
df_list.append(df.iloc[:-1])
else:
df_list.append(df)
return | pd.concat(df_list) | pandas.concat |
import math
import gc
import os
import pickle
from math import pi
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from omegaconf import DictConfig
from scipy.fft import fft
from scipy.signal import blackman
from scipy.signal import hilbert
from sklearn.model_selection import GroupKFold
from sklearn.preprocessing import RobustScaler
from torch.utils.data import DataLoader
from src.utils.technical_utils import load_obj
class VentilatorDataModule(pl.LightningDataModule):
def __init__(self, cfg: DictConfig):
super().__init__()
self.cfg = cfg
def prepare_data(self):
pass
def make_features(self, data):
if "pressure" not in data.columns:
data['pressure'] = 0
data['RC_sum'] = data['R'] + data['C']
data['RC_div'] = data['R'] / data['C']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['RC'] = data['R'] + data['C']
data = pd.get_dummies(data)
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
# data['time_step_lag'] = data.groupby('breath_id')['time_step'].shift(1)
data['time_step_lag'] = data.groupby('breath_id')['time_step'].shift(2)
data['u_in_lag'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag'] = data.groupby('u_out')['u_in'].shift(1)
return data.fillna(0)
def make_features1(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data.drop(['id', 'breath_id'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data
def make_features2(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data
def make_features3(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0, drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0, drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0)
def make_features32(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = | pd.get_dummies(data) | pandas.get_dummies |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fbprophet import Prophet
from scipy import optimize, stats
from sklearn import preprocessing, svm
import statsmodels.api as sm
import pmdarima
import statsmodels.tsa.api as smt
import arch
## for deep learning
#from tensorflow.keras import models, layers, preprocessing as kprocessing
pd.plotting.register_matplotlib_converters()
###############################################################################
# TS ANALYSIS #
###############################################################################
'''
Plot ts with rolling mean and 95% confidence interval with rolling std.
:parameter
:param ts: pandas Series
:param window: num for rolling stats
'''
def plot_ts(ts, plot_ma=True, plot_intervals=True, window=30, figsize=(15,5)):
rolling_mean = ts.rolling(window=window).mean()
rolling_std = ts.rolling(window=window).std()
plt.figure(figsize=figsize)
plt.title(ts.name)
plt.plot(ts[window:], label='Actual values', color="black")
if plot_ma:
plt.plot(rolling_mean, 'g', label='MA'+str(window), color="red")
if plot_intervals:
lower_bound = rolling_mean - (1.96 * rolling_std)
upper_bound = rolling_mean + (1.96 * rolling_std)
plt.fill_between(x=ts.index, y1=lower_bound, y2=upper_bound, color='lightskyblue', alpha=0.4)
plt.legend(loc='best')
plt.grid(True)
plt.show()
'''
Test stationarity by:
- running Augmented Dickey-Fuller test wiht 95%
- plotting mean and variance of a sample from data
- plottig autocorrelation and partial autocorrelation
'''
def test_stationarity_acf_pacf(ts, sample=0.20, maxlag=30, figsize=(15,10)):
with plt.style.context(style='bmh'):
## set figure
fig = plt.figure(figsize=figsize)
ts_ax = plt.subplot2grid(shape=(2,2), loc=(0,0), colspan=2)
pacf_ax = plt.subplot2grid(shape=(2,2), loc=(1,0))
acf_ax = plt.subplot2grid(shape=(2,2), loc=(1,1))
## plot ts with mean/std of a sample from the first x%
dtf_ts = ts.to_frame(name="ts")
sample_size = int(len(ts)*sample)
dtf_ts["mean"] = dtf_ts["ts"].head(sample_size).mean()
dtf_ts["lower"] = dtf_ts["ts"].head(sample_size).mean() + dtf_ts["ts"].head(sample_size).std()
dtf_ts["upper"] = dtf_ts["ts"].head(sample_size).mean() - dtf_ts["ts"].head(sample_size).std()
dtf_ts["ts"].plot(ax=ts_ax, color="black", legend=False)
dtf_ts["mean"].plot(ax=ts_ax, legend=False, color="red", linestyle="--", linewidth=0.7)
ts_ax.fill_between(x=dtf_ts.index, y1=dtf_ts['lower'], y2=dtf_ts['upper'], color='lightskyblue', alpha=0.4)
dtf_ts["mean"].head(sample_size).plot(ax=ts_ax, legend=False, color="red", linewidth=0.9)
ts_ax.fill_between(x=dtf_ts.head(sample_size).index, y1=dtf_ts['lower'].head(sample_size), y2=dtf_ts['upper'].head(sample_size), color='lightskyblue')
## test stationarity (Augmented Dickey-Fuller)
adfuller_test = sm.tsa.stattools.adfuller(ts, maxlag=maxlag, autolag="AIC")
adf, p, critical_value = adfuller_test[0], adfuller_test[1], adfuller_test[4]["5%"]
p = round(p, 3)
conclusion = "Stationary" if p < 0.05 else "Non-Stationary"
ts_ax.set_title('Dickey-Fuller Test 95%: '+conclusion+' (p-value: '+str(p)+')')
## pacf (for AR) e acf (for MA)
smt.graphics.plot_pacf(ts, lags=maxlag, ax=pacf_ax, title="Partial Autocorrelation (for AR component)")
smt.graphics.plot_acf(ts, lags=maxlag, ax=acf_ax, title="Autocorrelation (for MA component)")
plt.tight_layout()
'''
Defferenciate ts.
:parameter
:param ts: pandas Series
:param lag: num - diff[t] = y[t] - y[t-lag]
:param order: num - how many times it has to differenciate: diff[t]^order = diff[t] - diff[t-lag]
:param drop_na: logic - if True Na are dropped, else are filled with last observation
'''
def diff_ts(ts, lag=1, order=1, drop_na=True):
for i in range(order):
ts = ts - ts.shift(lag)
ts = ts[(pd.notnull(ts))] if drop_na is True else ts.fillna(method="bfill")
return ts
'''
'''
def undo_diff(ts, first_y, lag=1, order=1):
for i in range(order):
(24168.04468 - 18256.02366) + a.cumsum()
ts = np.r_[ts, ts[lag:]].cumsum()
return ts
'''
Run Granger test on 2 series
'''
def test_2ts_casuality(ts1, ts2, maxlag=30, figsize=(15,5)):
## prepare
dtf = ts1.to_frame(name=ts1.name)
dtf[ts2.name] = ts2
dtf.plot(figsize=figsize, grid=True, title=ts1.name+" vs "+ts2.name)
plt.show()
## test casuality (Granger test)
granger_test = sm.tsa.stattools.grangercausalitytests(dtf, maxlag=maxlag, verbose=False)
for lag,tupla in granger_test.items():
p = np.mean([tupla[0][k][1] for k in tupla[0].keys()])
p = round(p, 3)
if p < 0.05:
conclusion = "Casuality with lag "+str(lag)+" (p-value: "+str(p)+")"
print(conclusion)
'''
Decompose ts into
- trend component = moving avarage
- seasonality
- residuals = y - (trend + seasonality)
:parameter
:param s: num - number of observations per season (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
'''
def decompose_ts(ts, s=250, figsize=(20,13)):
decomposition = smt.seasonal_decompose(ts, freq=s)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
fig, ax = plt.subplots(nrows=4, ncols=1, sharex=True, sharey=False, figsize=figsize)
ax[0].plot(ts)
ax[0].set_title('Original')
ax[0].grid(True)
ax[1].plot(trend)
ax[1].set_title('Trend')
ax[1].grid(True)
ax[2].plot(seasonal)
ax[2].set_title('Seasonality')
ax[2].grid(True)
ax[3].plot(residual)
ax[3].set_title('Residuals')
ax[3].grid(True)
return {"trend":trend, "seasonal":seasonal, "residual":residual}
'''
Find outliers using sklearn unsupervised support vetcor machine.
:parameter
:param ts: pandas Series
:param perc: float - percentage of outliers to look for
:return
dtf with raw ts, outlier 1/0 (yes/no), numeric index
'''
def find_outliers(ts, perc=0.01, figsize=(15,5)):
## fit svm
scaler = preprocessing.StandardScaler()
ts_scaled = scaler.fit_transform(ts.values.reshape(-1,1))
model = svm.OneClassSVM(nu=perc, kernel="rbf", gamma=0.01)
model.fit(ts_scaled)
## dtf output
dtf_outliers = ts.to_frame(name="ts")
dtf_outliers["index"] = range(len(ts))
dtf_outliers["outlier"] = model.predict(ts_scaled)
dtf_outliers["outlier"] = dtf_outliers["outlier"].apply(lambda x: 1 if x==-1 else 0)
## plot
fig, ax = plt.subplots(figsize=figsize)
ax.set(title="Outliers detection: found "+str(sum(dtf_outliers["outlier"]==1)))
ax.plot(dtf_outliers["index"], dtf_outliers["ts"], color="black")
ax.scatter(x=dtf_outliers[dtf_outliers["outlier"]==1]["index"], y=dtf_outliers[dtf_outliers["outlier"]==1]['ts'], color='red')
ax.grid(True)
plt.show()
return dtf_outliers
'''
Interpolate outliers in a ts.
'''
def remove_outliers(ts, outliers_idx, figsize=(15,5)):
ts_clean = ts.copy()
ts_clean.loc[outliers_idx] = np.nan
ts_clean = ts_clean.interpolate(method="linear")
ax = ts.plot(figsize=figsize, color="red", alpha=0.5, title="Remove outliers", label="original", legend=True)
ts_clean.plot(ax=ax, grid=True, color="black", label="interpolated", legend=True)
plt.show()
return ts_clean
###############################################################################
# MODEL DESIGN & TESTING - FORECASTING #
###############################################################################
'''
Split train/test from any given data point.
:parameter
:param ts: pandas Series
:param exog: array len(ts) x n regressors
:param test: num or str - test size (ex. 0.20) or index position (ex. "yyyy-mm-dd", 1000)
:return
ts_train, ts_test, exog_train, exog_test
'''
def split_train_test(ts, exog=None, test=0.20, plot=True, figsize=(15,5)):
## define splitting point
if type(test) is float:
split = int(len(ts)*(1-test))
perc = test
elif type(test) is str:
split = ts.reset_index()[ts.reset_index().iloc[:,0]==test].index[0]
perc = round(len(ts[split:])/len(ts), 2)
else:
split = test
perc = round(len(ts[split:])/len(ts), 2)
print("--- splitting at index: ", split, "|", ts.index[split], "| test size:", perc, " ---")
## split ts
ts_train = ts.head(split)
ts_test = ts.tail(len(ts)-split)
if plot is True:
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=True, figsize=figsize)
ts_train.plot(ax=ax[0], grid=True, title="Train", color="black")
ts_test.plot(ax=ax[1], grid=True, title="Test", color="black")
ax[0].set(xlabel=None)
ax[1].set(xlabel=None)
plt.show()
## split exog
if exog is not None:
exog_train = exog[0:split]
exog_test = exog[split:]
return ts_train, ts_test, exog_train, exog_test
else:
return ts_train, ts_test
'''
Compute the confidence interval for predictions:
[y[t+h] +- (c*σ*√h)]
:parameter
:param lst_values: list or array
:param error_std: σ (standard dev of residuals)
:param conf: num - confidence level (90%, 95%, 99%)
:return
array with 2 columns (upper and lower bounds)
'''
def utils_conf_int(lst_values, error_std, conf=0.95):
lst_values = list(lst_values) if type(lst_values) != list else lst_values
c = round( stats.norm.ppf(1-(1-conf)/2), 2)
lst_ci = []
for x in lst_values:
lst_x = lst_values[:lst_values.index(x)+1]
h = len(lst_x)
ci = [x - (c*error_std*np.sqrt(h)), x + (c*error_std*np.sqrt(h))]
lst_ci.append(ci)
return np.array(lst_ci)
'''
Evaluation metrics for predictions.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper", "error"
'''
def utils_evaluate_ts_model(dtf, conf=0.95, title=None, plot=True, figsize=(20,13)):
try:
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_mean = dtf["residuals"].mean()
residuals_std = dtf["residuals"].std()
## forecasting error
### add column
dtf["error"] = dtf["ts"] - dtf["forecast"]
dtf["error_pct"] = dtf["error"] / dtf["ts"]
### kpi
error_mean = dtf["error"].mean()
error_std = dtf["error"].std()
mae = dtf["error"].apply(lambda x: np.abs(x)).mean() #mean absolute error
mape = dtf["error_pct"].apply(lambda x: np.abs(x)).mean() #mean absolute error %
mse = dtf["error"].apply(lambda x: x**2).mean() #mean squared error
rmse = np.sqrt(mse) #root mean squared error
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
fig = plt.figure(figsize=figsize)
fig.suptitle(title, fontsize=20)
ax1 = fig.add_subplot(2,2, 1)
ax2 = fig.add_subplot(2,2, 2, sharey=ax1)
ax3 = fig.add_subplot(2,2, 3)
ax4 = fig.add_subplot(2,2, 4)
### training
dtf[pd.notnull(dtf["model"])][["ts","model"]].plot(color=["black","green"], title="Model", grid=True, ax=ax1)
ax1.set(xlabel=None)
### test
dtf[pd.isnull(dtf["model"])][["ts","forecast"]].plot(color=["black","red"], title="Forecast", grid=True, ax=ax2)
ax2.fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
ax2.set(xlabel=None)
### residuals
dtf[["residuals","error"]].plot(ax=ax3, color=["green","red"], title="Residuals", grid=True)
ax3.set(xlabel=None)
### residuals distribution
dtf[["residuals","error"]].plot(ax=ax4, color=["green","red"], kind='kde', title="Residuals Distribution", grid=True)
ax4.set(ylabel=None)
plt.show()
print("Training --> Residuals mean:", np.round(residuals_mean), " | std:", np.round(residuals_std))
print("Test --> Error mean:", np.round(error_mean), " | std:", np.round(error_std),
" | mae:",np.round(mae), " | mape:",np.round(mape*100), "% | mse:",np.round(mse), " | rmse:",np.round(rmse))
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper", "error"]]
except Exception as e:
print("--- got error ---")
print(e)
'''
Generate dates to index predictions.
:parameter
:param start: str - "yyyy-mm-dd"
:param end: str - "yyyy-mm-dd"
:param n: num - length of index
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
'''
def utils_generate_indexdate(start, end=None, n=None, freq="D"):
if end is not None:
index = pd.date_range(start=start, end=end, freq=freq)
else:
index = pd.date_range(start=start, periods=n, freq=freq)
index = index[1:]
# print("start ", start)
# print("end ", end)
# print("index --", index)
print("--- generating index date --> start:", index[0], "| end:", index[-1], "| len:", len(index), "---")
return index
'''
Plot unknown future forecast and produce conf_int with residual_std and pred_int if an error_std is given.
:parameter
:param dtf: DataFrame with columns "ts", "model", "forecast", and "lower"/"upper" (if available)
:param conf: num - confidence level (90%, 95%, 99%)
:param zoom: int - plots the focus on the last zoom days
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper" (No error)
'''
def utils_add_forecast_int(dtf, conf=0.95, plot=True, zoom=30, figsize=(15,5)):
## residuals from fitting
### add column
dtf["residuals"] = dtf["ts"] - dtf["model"]
### kpi
residuals_std = dtf["residuals"].std()
## interval
if "upper" not in dtf.columns:
print("--- computing confidence interval ---")
dtf["lower"], dtf["upper"] = [np.nan, np.nan]
dtf.loc[dtf["forecast"].notnull(), ["lower","upper"]] = utils_conf_int(
dtf[dtf["forecast"].notnull()]["forecast"], residuals_std, conf)
## plot
if plot is True:
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
### entire series
dtf[["ts","forecast"]].plot(color=["black","red"], grid=True, ax=ax[0], title="History + Future")
ax[0].fill_between(x=dtf.index, y1=dtf['lower'], y2=dtf['upper'], color='b', alpha=0.2)
### focus on last
first_idx = dtf[pd.notnull(dtf["forecast"])].index[0]
first_loc = dtf.index.tolist().index(first_idx)
zoom_idx = dtf.index[first_loc-zoom]
dtf.loc[zoom_idx:][["ts","forecast"]].plot(color=["black","red"], grid=True, ax=ax[1], title="Zoom on the last "+str(zoom)+" observations")
ax[1].fill_between(x=dtf.loc[zoom_idx:].index, y1=dtf.loc[zoom_idx:]['lower'], y2=dtf.loc[zoom_idx:]['upper'], color='b', alpha=0.2)
plt.show()
return dtf[["ts", "model", "residuals", "lower", "forecast", "upper"]]
###############################################################################
# RANDOM WALK #
###############################################################################
'''
Generate a Random Walk process.
:parameter
:param y0: num - starting value
:param n: num - length of process
:param ymin: num - limit
:param ymax: num - limit
'''
def utils_generate_rw(y0, n, sigma, ymin=None, ymax=None):
rw = [y0]
for t in range(1, n):
yt = rw[t-1] + np.random.normal(0,sigma)
if (ymax is not None) and (yt > ymax):
yt = rw[t-1] - abs(np.random.normal(0,sigma))
elif (ymin is not None) and (yt < ymin):
yt = rw[t-1] + abs(np.random.normal(0,sigma))
rw.append(yt)
return rw
'''
Simulate Random Walk from params of a given ts:
y[t+1] = y[t] + wn~(0,σ)
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper", "error"
'''
def simulate_rw(ts_train, ts_test, conf=0.95, figsize=(15,10)):
## simulate train
diff_ts = ts_train - ts_train.shift(1)
rw = utils_generate_rw(y0=ts_train[0], n=len(ts_train), sigma=diff_ts.std(), ymin=ts_train.min(), ymax=ts_train.max())
dtf_train = ts_train.to_frame(name="ts").merge(pd.DataFrame(rw, index=ts_train.index, columns=["model"]), how='left', left_index=True, right_index=True)
## test
rw = utils_generate_rw(y0=ts_train[-1], n=len(ts_test), sigma=diff_ts.std(), ymin=ts_train.min(), ymax=ts_train.max())
dtf_test = ts_test.to_frame(name="ts").merge(pd.DataFrame(rw, index=ts_test.index, columns=["forecast"]),
how='left', left_index=True, right_index=True)
## evaluate
dtf = dtf_train.append(dtf_test)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="Random Walk Simulation")
return dtf
'''
Forecast unknown future.
:parameter
:param ts: pandas series
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
:return
dtf with columns "ts", "model", "residuals", "lower", "forecast", "upper" (No error)
'''
def forecast_rw(ts, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(15,5)):
## fit
diff_ts = ts - ts.shift(1)
sigma = diff_ts.std()
rw = utils_generate_rw(y0=ts[0], n=len(ts), sigma=sigma, ymin=ts.min(), ymax=ts.max())
dtf = ts.to_frame(name="ts").merge(pd.DataFrame(rw, index=ts.index, columns=["model"]),
how='left', left_index=True, right_index=True)
## index
index = utils_generate_indexdate(start=ts.index[-1], end=end, n=pred_ahead, freq=freq)
## forecast
preds = utils_generate_rw(y0=ts[-1], n=len(index), sigma=sigma, ymin=ts.min(), ymax=ts.max())
dtf = dtf.append(pd.DataFrame(data=preds, index=index, columns=["forecast"]))
## add intervals and plot
dtf = utils_add_forecast_int(dtf, conf=conf, zoom=zoom)
return dtf
###############################################################################
# AUTOREGRESSIVE #
###############################################################################
'''
Fits Holt-Winters Exponential Smoothing:
y[t+i] = (level[t] + i*trend[t]) * seasonality[t]
:parameter
:param ts_train: pandas timeseries
:param ts_test: pandas timeseries
:param trend: str - "additive" (linear), "multiplicative" (non-linear)
:param seasonal: str - "additive" (ex. +100 every 7 days), "multiplicative" (ex. x10 every 7 days)
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param alpha: num - the alpha value of the simple exponential smoothing (ex 0.94)
:return
dtf with predictons and the model
'''
def fit_expsmooth(ts_train, ts_test, trend="additive", seasonal="multiplicative", s=None, alpha=0.94, conf=0.95, figsize=(15,10)):
## checks
check_seasonality = "Seasonal parameters: No Seasonality" if (seasonal is None) & (s is None) else "Seasonal parameters: "+str(seasonal)+" Seasonality every "+str(s)+" observations"
print(check_seasonality)
## train
#alpha = alpha if s is None else 2/(s+1)
model = smt.ExponentialSmoothing(ts_train, trend=trend, seasonal=seasonal, seasonal_periods=s).fit(smoothing_level=alpha)
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.fittedvalues
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.predict(start=len(ts_train), end=len(ts_train)+len(ts_test)-1)
## evaluate
dtf = dtf_train.append(dtf_test)
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title="Holt-Winters ("+str(alpha)+")")
return dtf, model
'''
Fits SARIMAX (Seasonal ARIMA with External Regressors):
y[t+1] = (c + a0*y[t] + a1*y[t-1] +...+ ap*y[t-p]) + (e[t] + b1*e[t-1] + b2*e[t-2] +...+ bq*e[t-q]) + (B*X[t])
:parameter
:param ts_train: pandas timeseries
:param ts_test: pandas timeseries
:param order: tuple - ARIMA(p,d,q) --> p: lag order (AR), d: degree of differencing (to remove trend), q: order of moving average (MA)
:param seasonal_order: tuple - (P,D,Q,s) --> s: number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param exog_train: pandas dataframe or numpy array
:param exog_test: pandas dataframe or numpy array
:return
dtf with predictons and the model
'''
def fit_sarimax(ts_train, ts_test, order=(1,0,1), seasonal_order=(0,0,0,0), exog_train=None, exog_test=None, conf=0.95, figsize=(15,10)):
## checks
check_trend = "Trend parameters: No differencing" if order[1] == 0 else "Trend parameters: d="+str(order[1])
print(check_trend)
check_seasonality = "Seasonal parameters: No Seasonality" if (seasonal_order[3] == 0) & (np.sum(seasonal_order[0:3]) == 0) else "Seasonal parameters: Seasonality every "+str(seasonal_order[3])+" observations"
print(check_seasonality)
check_exog = "Exog parameters: Not given" if (exog_train is None) & (exog_test is None) else "Exog parameters: number of regressors="+str(exog_train.shape[1])
print(check_exog)
## train
model = smt.SARIMAX(ts_train, order=order, seasonal_order=seasonal_order, exog=exog_train, enforce_stationarity=False, enforce_invertibility=False).fit()
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.fittedvalues
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.predict(start=len(ts_train), end=len(ts_train)+len(ts_test)-1, exog=exog_test)
## add conf_int
ci = model.get_forecast(len(ts_test)).conf_int(1-conf).values
dtf_test["lower"], dtf_test["upper"] = ci[:,0], ci[:,1]
## evaluate
dtf = dtf_train.append(dtf_test)
title = "ARIMA "+str(order) if exog_train is None else "ARIMAX "+str(order)
title = "S"+title+" x "+str(seasonal_order) if np.sum(seasonal_order) > 0 else title
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title=title)
return dtf, model
'''
Find best Seasonal-ARIMAX parameters.
:parameter
:param ts: pandas timeseries
:param exog: pandas dataframe or numpy array
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:return
best model
'''
def find_best_sarimax(ts, seasonal=True, stationary=False, s=1, exog=None,
max_p=10, max_d=3, max_q=10,
max_P=10, max_D=3, max_Q=10):
best_model = pmdarima.auto_arima(ts, exogenous=exog,
seasonal=seasonal, stationary=stationary, m=s,
information_criterion='aic', max_order=20,
max_p=max_p, max_d=max_d, max_q=max_q,
max_P=max_P, max_D=max_D, max_Q=max_Q,
error_action='ignore')
print("best model --> (p, d, q):", best_model.order, " and (P, D, Q, s):", best_model.seasonal_order)
return best_model.summary()
'''
Fits GARCH (Generalized Autoregressive Conditional Heteroskedasticity):
y[t+1] = m + e[t+1]
e[t+1] = σ[t+1] * wn~(0,1)
σ²[t+1] = c + (a0*σ²[t] + a1*σ²[t-1] +...+ ap*σ²[t-p]) + (b0*e²[t] + b1*e[t-1] + b2*e²[t-2] +...+ bq*e²[t-q])
:parameter
:param ts: pandas timeseries
:param order: tuple - ARIMA(p,d,q) --> p:lag order (AR), d:degree of differencing (to remove trend), q:order of moving average (MA)
'''
def fit_garch(ts_train, ts_test, order=(1,0,1), seasonal_order=(0,0,0,0), exog_train=None, exog_test=None, figsize=(15,10)):
## train
arima = smt.SARIMAX(ts_train, order=order, seasonal_order=seasonal_order, exog=exog_train, enforce_stationarity=False, enforce_invertibility=False).fit()
garch = arch.arch_model(arima.resid, p=order[0], o=order[1], q=order[2], x=exog_train, dist='StudentsT', power=2.0, mean='Constant', vol='GARCH')
model = garch.fit(update_freq=seasonal_order[3])
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = model.conditional_volatility
## test
dtf_test = ts_test.to_frame(name="ts")
dtf_test["forecast"] = model.forecast(horizon=len(ts_test))
## evaluate
dtf = dtf_train.append(dtf_test)
title = "GARCH ("+str(order[0])+","+str(order[2])+")" if order[0] != 0 else "ARCH ("+str(order[2])+")"
dtf = utils_evaluate_ts_model(dtf, conf=conf, figsize=figsize, title=title)
return dtf, model
'''
Forecast unknown future.
:parameter
:param ts: pandas series
:param model: model object
:param pred_ahead: number of observations to forecast (ex. pred_ahead=30)
:param end: string - date to forecast (ex. end="2016-12-31")
:param freq: None or str - 'B' business day, 'D' daily, 'W' weekly, 'M' monthly, 'A' annual, 'Q' quarterly
:param zoom: for plotting
'''
def forecast_arima(ts, model, pred_ahead=None, end=None, freq="D", conf=0.95, zoom=30, figsize=(15,5)):
## fit
model = model.fit()
dtf = ts.to_frame(name="ts")
dtf["model"] = model.fittedvalues
dtf["residuals"] = dtf["ts"] - dtf["model"]
## index
index = utils_generate_indexdate(start=ts.index[-1], end=end, n=pred_ahead, freq=freq)
## forecast
preds = model.get_forecast(len(index))
dtf_preds = preds.predicted_mean.to_frame(name="forecast")
## add conf_int
ci = preds.conf_int(1-conf).values
dtf_preds["lower"], dtf_preds["upper"] = ci[:,0], ci[:,1]
## add intervals and plot
dtf = dtf.append(dtf_preds)
dtf = utils_add_forecast_int(dtf, conf=conf, zoom=zoom)
return dtf
###############################################################################
# RNN #
###############################################################################
'''
Plot loss and metrics of keras training.
'''
def utils_plot_keras_training(training):
metrics = [k for k in training.history.keys() if ("loss" not in k) and ("val" not in k)]
fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(15,3))
## training
ax[0].set(title="Training")
ax11 = ax[0].twinx()
ax[0].plot(training.history['loss'], color='black')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Loss', color='black')
for metric in metrics:
ax11.plot(training.history[metric], label=metric)
ax11.set_ylabel("Score", color='steelblue')
ax11.legend()
## validation
ax[1].set(title="Validation")
ax22 = ax[1].twinx()
ax[1].plot(training.history['val_loss'], color='black')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Loss', color='black')
for metric in metrics:
ax22.plot(training.history['val_'+metric], label=metric)
ax22.set_ylabel("Score", color="steelblue")
plt.show()
'''
Preprocess a ts partitioning into X and y.
:parameter
:param ts: pandas timeseries
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:param scaler: sklearn scaler object - if None is fitted
:param exog: pandas dataframe or numpy array
:return
X, y, scaler
'''
def utils_preprocess_ts(ts, s, scaler=None, exog=None):
## scale
if scaler is None:
scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
ts_preprocessed = scaler.fit_transform(ts.values.reshape(-1,1)).reshape(-1)
## create X,y for train
ts_preprocessed = kprocessing.sequence.TimeseriesGenerator(data=ts_preprocessed,
targets=ts_preprocessed,
length=s, batch_size=1)
lst_X, lst_y = [], []
for i in range(len(ts_preprocessed)):
xi, yi = ts_preprocessed[i]
lst_X.append(xi)
lst_y.append(yi)
X = np.array(lst_X)
y = np.array(lst_y)
return X, y, scaler
'''
Get fitted values.
'''
def utils_fitted_lstm(ts, model, scaler, exog=None):
## scale
ts_preprocessed = scaler.fit_transform(ts.values.reshape(-1,1)).reshape(-1)
## create Xy, predict = fitted
s = model.input_shape[-1]
lst_fitted = [np.nan]*s
for i in range(len(ts_preprocessed)):
end_ix = i + s
if end_ix > len(ts_preprocessed)-1:
break
X = ts_preprocessed[i:end_ix]
X = np.array(X)
X = np.reshape(X, (1,1,X.shape[0]))
fit = model.predict(X)
fit = scaler.inverse_transform(fit)[0][0]
lst_fitted.append(fit)
return np.array(lst_fitted)
'''
Predict ts using previous predictions.
'''
def utils_predict_lstm(ts, model, scaler, pred_ahead, exog=None):
## scale
s = model.input_shape[-1]
ts_preprocessed = list(scaler.fit_transform(ts[-s:].values.reshape(-1,1)))
## predict, append, re-predict
lst_preds = []
for i in range(pred_ahead):
X = np.array(ts_preprocessed[len(ts_preprocessed)-s:])
X = np.reshape(X, (1,1,X.shape[0]))
pred = model.predict(X)
ts_preprocessed.append(pred)
pred = scaler.inverse_transform(pred)[0][0]
lst_preds.append(pred)
return np.array(lst_preds)
'''
Fit Long short-term memory neural network.
:parameter
:param ts: pandas timeseries
:param exog: pandas dataframe or numpy array
:param s: num - number of observations per seasonal (ex. 7 for weekly seasonality with daily data, 12 for yearly seasonality with monthly data)
:return
generator, scaler
'''
def fit_lstm(ts_train, ts_test, model, exog=None, s=20, epochs=100, conf=0.95, figsize=(15,5)):
## check
print("Seasonality: using the last", s, "observations to predict the next 1")
## preprocess train
X_train, y_train, scaler = utils_preprocess_ts(ts_train, scaler=None, exog=exog, s=s)
## lstm
if model is None:
model = models.Sequential()
model.add( layers.LSTM(input_shape=X_train.shape[1:], units=50, activation='relu', return_sequences=False) )
model.add( layers.Dense(1) )
model.compile(optimizer='adam', loss='mean_absolute_error')
print(model.summary())
## train
verbose = 0 if epochs > 1 else 1
training = model.fit(x=X_train, y=y_train, batch_size=1, epochs=epochs, shuffle=True, verbose=verbose, validation_split=0.3)
dtf_train = ts_train.to_frame(name="ts")
dtf_train["model"] = utils_fitted_lstm(ts_train, training.model, scaler, exog)
dtf_train["model"] = dtf_train["model"].fillna(method='bfill')
## test
preds = utils_predict_lstm(ts_train[-s:], training.model, scaler, pred_ahead=len(ts_test), exog=None)
dtf_test = ts_test.to_frame(name="ts").merge( | pd.DataFrame(data=preds, index=ts_test.index, columns=["forecast"]) | pandas.DataFrame |
########################################################
# <NAME> - drigols #
# Last update: 21/09/2021 #
########################################################
if __name__ =="__main__":
from matplotlib import pyplot as plt
import pandas as pd
students_dic = {
'Grade':[50, 50, 46, 95, 50, 5, 57, 42, 26, 72, 78, 60, 40, 17, 85],
'Salary':[50000, 54000, 50000, 189000, 55000, 40000, 59000, 42000, 47000, 78000, 119000, 95000, 49000, 29000, 130000]
}
df = | pd.DataFrame(students_dic) | pandas.DataFrame |
import pandas as pd
import numpy as np
import fileinput
import itertools
from .PrintUtil import *
from .KBaseObjUtil import *
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 50)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 20)
class HTMLBuilder:
html_template_filepath = '/kb/module/lib/AD_VINA/util/index.html'
TAG_L = [tag + '_TAG' for tag in ['PROTEIN', 'WARNINGS', 'CMDS', 'JSON', 'COLUMNS']]
def __init__(self, ps: ProteinStructure, cs: CompoundSet, cmd_l: list):
self.ps = ps
self.cs = cs
self.cmd_l = cmd_l
# html dir and file
self.html_dir = os.path.join(VarStash.shared_folder, 'html_dir' + VarStash.suffix)
os.mkdir(self.html_dir)
self.html_filepath = os.path.join(self.html_dir, os.path.basename(self.html_template_filepath))
shutil.copyfile(self.html_template_filepath, self.html_filepath)
# html file content
self._build()
def _build(self):
self.replacements = {}
self._build_protein()
self._build_cmds()
self._build_table()
self._build_warnings()
for line in fileinput.input(self.html_filepath, inplace=True):
tag_hit = None
for tag in self.TAG_L:
if tag in line and tag in self.replacements:
tag_hit = tag
if tag_hit:
print(line.replace(tag_hit, self.replacements[tag_hit]), end='')
else:
print(line, end='')
def _build_protein(self):
self.replacements['PROTEIN_TAG'] = self.ps.name
def _build_cmds(self):
if len(self.cmd_l) > 0:
self.replacements['CMDS_TAG'] = '\n'.join([f"<p><code>{cmd}</code></p>" for cmd in self.cmd_l])
else:
self.replacements['CMDS_TAG'] = (
"<p><i>No <code>vina</code> commands were run. "
"This could be because no MOL2 files were found in ZINC for any of the compounds, "
"no MOL2 files successfully converted to PDBQT, "
"or there are no compounds in the CompoundSet object.</i></p>"
)
def _build_table(self):
"""
(1) Build docking table
(2) Merge with compound table
(3) Dump
"""
col_title_compound = ['(User Entered) Id', '(User Entered) Name', 'Formula', 'SMILES', 'InChIKey', 'Mass (g/mol)', 'Charge (C)', 'ΔG<sub>f</sub>° (kJ/mol)']
col_title_dock = ['Mode', 'Affinity (kcal/mol)', 'RMSD L.B.', 'RMSD U.B.']
#-------------------------------------------------------------------------------------------
##
## docking table
attr_dock = ['mode', 'affinity', 'rmsd_lb', 'rmsd_ub']
df_l = []
for id, log_filepath in zip(self.cs.get_attr_l('id'), self.cs.get_attr_l('log_filepath')):
if isinstance(log_filepath, float) and np.isnan(log_filepath):
continue
df = pd.DataFrame(columns=attr_dock)
df = self._parse_log(log_filepath, df, id)
df_l.append(df)
df_dock = | pd.concat(df_l) | pandas.concat |
import numpy as np
from nodevectors import Node2Vec
import gensim
import csrgraph as cg
import time
import pandas as pd
class Node2VecResizable(Node2Vec):
def _deepwalk(self, G, node_names):
# Adjacency matrix
walks_t = time.time()
if self.verbose:
print("Making walks...", end=" ")
self.walks = G.random_walks(
walklen=self.walklen,
epochs=self.epochs,
return_weight=self.return_weight,
neighbor_weight=self.neighbor_weight,
)
if self.verbose:
print(f"Done, T={time.time() - walks_t:.2f}")
print("Mapping Walk Names...", end=" ")
map_t = time.time()
self.walks = | pd.DataFrame(self.walks) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
"""
import os
from datapackage import Package
import json
import pandas as pd
from oemof.tabular.datapackage import building
def _get_hydro_inflow(inflow_dir=None):
""" Adapted from:
https://github.com/FRESNA/vresutils/blob/master/vresutils/hydro.py
"""
def read_inflow(country):
return pd.read_csv(
os.path.join(inflow_dir, "Hydro_Inflow_{}.csv".format(country)),
parse_dates={"date": [0, 1, 2]},
).set_index("date")["Inflow [GWh]"]
europe = [
"AT",
"BA",
"BE",
"BG",
"CH",
"CZ",
"DE",
"ES",
"FI",
"FR",
"HR",
"HU",
"IE",
"IT",
"KV",
"LT",
"LV",
"ME",
"MK",
"NL",
"NO",
"PL",
"PT",
"RO",
"RS",
"SE",
"SI",
"SK",
"UK",
]
hyd = pd.DataFrame({cname: read_inflow(cname) for cname in europe})
hyd.rename(columns={"UK": "GB"}, inplace=True) # for ISO country code
hydro = hyd.resample("H").interpolate("cubic")
# add last day of the dataset that is missing from resampling
last_day = pd.DataFrame(
index=pd.DatetimeIndex(start="20121231", freq="H", periods=24),
columns=hydro.columns,
)
data = hyd.loc["2012-12-31"]
for c in last_day:
last_day.loc[:, c] = data[c]
# need to drop last day because it comes in last day...
hydro = pd.concat([hydro.drop(hydro.tail(1).index), last_day])
# remove last day in Feb for leap years
hydro = hydro[~((hydro.index.month == 2) & (hydro.index.day == 29))]
if True: # default norm
normalization_factor = hydro.index.size / float(
hyd.index.size
) # normalize to new sampling frequency
# else:
# # conserve total inflow for each country separately
# normalization_factor = hydro.sum() / hyd.sum()
hydro /= normalization_factor
return hydro
def generation(config, scenario_year, datapackage_dir, raw_data_path):
"""
"""
countries, scenario_year = (
config["buses"]["electricity"],
config["scenario"]["year"],
)
building.download_data(
"https://zenodo.org/record/804244/files/Hydro_Inflow.zip?download=1",
directory=raw_data_path,
unzip_file="Hydro_Inflow/",
)
technologies = pd.DataFrame(
Package(
"https://raw.githubusercontent.com/ZNES-datapackages/"
"angus-input-data/master/technology/datapackage.json"
)
.get_resource("technology")
.read(keyed=True)
).set_index(["year", "parameter", "carrier", "tech"])
hydro_data = pd.DataFrame(
Package(
"https://raw.githubusercontent.com/ZNES-datapackages/"
"angus-input-data/master/hydro/datapackage.json"
)
.get_resource("hydro")
.read(keyed=True)
).set_index(["year", "country"])
hydro_data.rename(index={"UK": "GB"}, inplace=True) # for iso code
inflows = _get_hydro_inflow(
inflow_dir=os.path.join(raw_data_path, "Hydro_Inflow")
)
inflows = inflows.loc[
inflows.index.year == config["scenario"]["weather_year"], :
]
inflows["DK"], inflows["LU"] = 0, inflows["BE"]
for c in hydro_data.columns:
if c != "source":
hydro_data[c] = hydro_data[c].astype(float)
capacities = hydro_data.loc[scenario_year].loc[countries][
["ror", "rsv", "phs"]
]
ror_shares = hydro_data.loc[scenario_year].loc[countries]["ror-share"]
max_hours = hydro_data.loc[scenario_year].loc[countries][
["rsv-max-hours", "phs-max-hours"]
]
rsv_factor = hydro_data.loc[scenario_year].loc[countries]["rsv-factor"]
# ror
elements = {}
for country in countries:
name = country + "-hydro-ror"
capacity = capacities.loc[country, "ror"]
# eta = technologies.loc[
# (scenario_year, "efficiency", "hydro", "ror"), "value"
# ]
if capacity > 0:
elements[name] = {
"type": "volatile",
"tech": "ror",
"carrier": "hydro",
"bus": country + "-electricity",
"capacity": capacity,
"profile": country + "-ror-profile",
"efficiency": 1, # as already included in inflow profile
}
building.write_elements(
"ror.csv",
| pd.DataFrame.from_dict(elements, orient="index") | pandas.DataFrame.from_dict |
"""
Copyright (c) 2018-2021 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
* The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details
provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
* Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
* This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import json
import time
import string
import requests
import pandas as pd
from math import trunc
from time import strftime
from random import choice
from celery import shared_task
from django.core.mail import send_mail
from device_notification_subsystem.settings import conf
from .models import Sms, SmsContent, Email, EmailContent, UniqueEmail, UniqueMsisdn
@shared_task()
def celery_email(subject, content, sender, recipient):
recipients = [recipient]
send_mail(subject, content, sender, recipients, fail_silently=False)
return "Email has been sent"
@shared_task()
def jasmine_sms(msisdn, content, sender_no, operator):
url = conf['jasmine_single_sms_url']
sms_body = {
"to": msisdn,
"from": sender_no,
# "coding": 0,
"content": content
}
auth = jasmin_auth(operator)
headers = {'content-type': 'application/json', 'Authorization': auth}
response = requests.post(url=url, data=json.dumps(sms_body), headers=headers)
if response:
if response.status_code == 200:
return "SMS sent to {m} : {c}".format(m=msisdn, c=content)
else:
return "SMS delivery to {m} is failed".format(m=msisdn)
@shared_task
def bulk_email_db(subject, content, subsystem, sender_email):
t1 = time.time()
qry = UniqueEmail.objects.order_by('email').values_list('email', flat=True).distinct()
campaign_id = "Email_DB_" + strftime("%Y-%m-%d_%H-%M-%S")
for q in qry:
print(f"sending email to: {q}")
celery_email.apply_async(args=[subject, content, sender_email, q], queue='email_que') # calling celery task
write_email_db(q, sender_email, subsystem, subject, content, campaign_id)
t2 = time.time()
return "Email campaign completed in: {time} secs".format(time=(t2 - t1))
@shared_task
def bulk_sms_db(content, operator, subsystem, sender_no, sms_rate):
t1 = time.time()
qry = UniqueMsisdn.objects.order_by('msisdn').values_list('msisdn', flat=True).distinct()
start = 0
total_msisdns = len(qry)
sms_batch_size = sms_rate
total_chunks = (total_msisdns / sms_batch_size)
if total_chunks.is_integer(): total_chunks = trunc(total_chunks)
else: total_chunks = trunc(total_chunks) + 1
print("total chunks = ", total_chunks)
end = sms_batch_size
for chunk in range(0, total_chunks):
if chunk != total_chunks - 1:
msisdn_list = qry[start:end]
start = end
end = end + sms_batch_size
else:
msisdn_list = qry[start:]
print("processing chunk-", chunk + 1)
# res = send_sms_batch(msisdn_list, content)
send_sms_batch(msisdn_list, operator, content)
print("DB insertion started ")
campaign_id = "SMS_DB_" + strftime("%Y-%m-%d_%H-%M-%S")
# result = [write_sms_db(q, sender_no, operator, subsystem, content, campaign_id) for q in qry]
[write_sms_db(q, sender_no, operator, subsystem, content, campaign_id) for q in qry]
t2 = time.time()
return "SMS campaign completed in: {time} sec".format(time=(t2 - t1))
# noinspection PyUnboundLocalVariable,PyUnusedLocal
@shared_task
def bulk_email_file(file, subject, content, subsystem, sender_email):
"""Function to create celery task of processing bulk file for sending Email campaigns """
t1 = time.time()
# extracting file path & file name
file_path, file_name = os.path.split(file)
try:
# read the file into DataFrame
df_csv = pd.read_csv(file, usecols=range(2), dtype={"imei": str, "imsi": str, "email": str},
chunksize=conf['df_big_chunksize'])
except Exception as e:
if e:
error = {"Error": "File content is not Correct"}
return json.dumps(error)
df = pd.concat(df_csv)
# removing white spaces from Column 'email'
df['email'] = df['email'].str.strip()
# removing Email-IDs with wrong format
df = df[(df.email.astype(str).str.match(conf['validation_regex']['email']))]
rows, cols = df.shape
print(rows, cols)
start = 0
# generating random string for file-name
all_char = string.ascii_letters + string.digits
rand_str = "".join(choice(all_char) for x in range(8))
if rows >= 10:
chunk_size = trunc(rows / 10)
end = chunk_size
email_files, all_files = [], []
for i in range(1, 11):
print(start, end)
f_all = "Email_" + rand_str + "_chunk_" + str(i) + ".csv"
file_all = os.path.join(file_path, f_all)
print(file_all)
all_files.append(file_all)
if i != 10:
df[start:end].to_csv(file_all, index=False)
start = end
end = end + chunk_size
else:
df[start:].to_csv(file_all, index=False)
else:
return "File must contain more than 10 Email-IDs"
for i in range(1, 11):
print("Processing File-", i)
all_file = all_files[i - 1]
que = 'que' + str(i)
process_email_file.apply_async(args=[all_file, subject, content, subsystem, file_name, sender_email],
queue=que)
t2 = time.time()
return "File chunking completed in: {time} sec".format(time=(t2 - t1))
# noinspection PyUnboundLocalVariable,PyUnusedLocal
@shared_task
def bulk_sms_file(file, content, operator, subsystem, sender_no, sms_rate):
"""Function to create celery task of processing bulk file for sending SMS campaign """
t1 = time.time()
# extracting file path & file name
file_path, file_name = os.path.split(file)
try:
# read the file into DataFrame
df_csv = pd.read_csv(file, usecols=range(5), dtype={"imei": str, "imsi": str, "msisdn": str, "block_date": str,
"reasons": str}, chunksize=conf['df_big_chunksize'])
except Exception as e:
if e:
error = {"Error": "File content is not Correct"}
return json.dumps(error)
df = pd.concat(df_csv)
# removing white spaces from Column 'msisdn'
df['msisdn'] = df['msisdn'].str.strip()
# removing MSISDN with wrong format
df = df[(df.msisdn.astype(str).str.match(conf['validation_regex']['msisdn']))]
# Copying "MSISDN" column to new DataFrame
df_new = pd.DataFrame()
df_new['msisdn'] = df['msisdn']
rows, cols = df_new.shape
print(rows, cols)
start = 0
# generating random string for file-name
all_char = string.ascii_letters + string.digits
rand_str = "".join(choice(all_char) for x in range(8))
if rows >= 10:
chunk_size = trunc(rows / 10)
end = chunk_size
msisdn_files, all_files = [], []
for i in range(1, 11):
print(start, end)
f_msisdn = "MSISDN_only_" + rand_str + "_chunk_" + str(i) + ".csv"
f_all = "File_all_" + rand_str + "_chunk_" + str(i) + ".csv"
file_msisdn = os.path.join(file_path, f_msisdn)
file_all = os.path.join(file_path, f_all)
print(file_msisdn)
msisdn_files.append(file_msisdn)
all_files.append(file_all)
if i != 10:
df_new[start:end].to_csv(file_msisdn, index=False)
df[start:end].to_csv(file_all, index=False)
start = end
end = end + chunk_size
else:
df_new[start:].to_csv(file_msisdn, index=False)
df[start:].to_csv(file_all, index=False)
else:
return "File must contain more than 10 MSISDNs"
for i in range(1, 11):
print("Processing File-", i)
msisdn_file = msisdn_files[i-1]
all_file = all_files[i-1]
que = 'que' + str(i)
process_sms_file.apply_async(args=[msisdn_file, all_file, content, operator, subsystem, file_name,
sender_no, sms_rate], queue=que)
t2 = time.time()
return "File chunking completed in: {time} sec".format(time=(t2 - t1))
@shared_task
def process_email_file(file_all, subject, content, subsystem, file_name, sender_email):
t1 = time.time()
df_t2 = | pd.read_csv(file_all, chunksize=conf['df_small_chunksize']) | pandas.read_csv |
import pandas as pd
def read_filter_lbmp(path):
"""
Read one csv file with pandas. Convert the Time Stamp field to pandas
datetime format. Filter out non-NYC nodes and only keep required columns.
Change column names to snake case for easier access.
Parameters
----------
path : str or other object for read_csv filepath parameter
Path to csv file with LBMP data
Returns
-------
DataFrame
df with 3 columns (time stamp, name of node, LBMP)
"""
df = | pd.read_csv(path, parse_dates=['Time Stamp']) | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import os
class data:
def __init__(self):
self.categories= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, layer2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='SepalWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = rplot.GeomPolyFit(2)
self.layers = rplot.sequence_layers([layer1, layer2, layer3])
self.trellis1 = rplot.TrellisGrid(['sex', 'smoker'])
self.trellis2 = rplot.TrellisGrid(['sex', '.'])
self.trellis3 = rplot.TrellisGrid(['.', 'smoker'])
self.trellised1 = self.trellis1.trellis(self.layers)
self.trellised2 = self.trellis2.trellis(self.layers)
self.trellised3 = self.trellis3.trellis(self.layers)
def test_grid_sizes(self):
self.assertEqual(len(self.trellised1), 3)
self.assertEqual(len(self.trellised2), 3)
self.assertEqual(len(self.trellised3), 3)
self.assertEqual(len(self.trellised1[0]), 2)
self.assertEqual(len(self.trellised1[0][0]), 2)
self.assertEqual(len(self.trellised2[0]), 2)
self.assertEqual(len(self.trellised2[0][0]), 1)
self.assertEqual(len(self.trellised3[0]), 1)
self.assertEqual(len(self.trellised3[0][0]), 2)
self.assertEqual(len(self.trellised1[1]), 2)
self.assertEqual(len(self.trellised1[1][0]), 2)
self.assertEqual(len(self.trellised2[1]), 2)
self.assertEqual(len(self.trellised2[1][0]), 1)
self.assertEqual(len(self.trellised3[1]), 1)
self.assertEqual(len(self.trellised3[1][0]), 2)
self.assertEqual(len(self.trellised1[2]), 2)
self.assertEqual(len(self.trellised1[2][0]), 2)
self.assertEqual(len(self.trellised2[2]), 2)
self.assertEqual(len(self.trellised2[2][0]), 1)
self.assertEqual(len(self.trellised3[2]), 1)
self.assertEqual(len(self.trellised3[2][0]), 2)
def test_trellis_cols_rows(self):
self.assertEqual(self.trellis1.cols, 2)
self.assertEqual(self.trellis1.rows, 2)
self.assertEqual(self.trellis2.cols, 1)
self.assertEqual(self.trellis2.rows, 2)
self.assertEqual(self.trellis3.cols, 2)
self.assertEqual(self.trellis3.rows, 1)
@tm.mplskip
class TestScaleGradient(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient("SepalLength", colour1=(0.2, 0.3,
0.4),
colour2=(0.8, 0.7, 0.6))
def test_gradient(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
@tm.mplskip
class TestScaleGradient2(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient2("SepalLength", colour1=(0.2, 0.3, 0.4), colour2=(0.8, 0.7, 0.6), colour3=(0.5, 0.5, 0.5))
def test_gradient2(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
r3, g3, b3 = self.gradient.colour3
value = row[self.gradient.column]
a_ = min(self.data[self.gradient.column])
b_ = max(self.data[self.gradient.column])
scaled = (value - a_) / (b_ - a_)
if scaled < 0.5:
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
else:
self.assertTrue(between(r2, r3, r))
self.assertTrue(between(g2, g3, g))
self.assertTrue(between(b2, b3, b))
@tm.mplskip
class TestScaleRandomColour(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.colour = rplot.ScaleRandomColour('SepalLength')
def test_random_colour(self):
for index in range(len(self.data)):
colour = self.colour(self.data, index)
self.assertEqual(len(colour), 3)
r, g, b = colour
self.assertTrue(r >= 0.0)
self.assertTrue(g >= 0.0)
self.assertTrue(b >= 0.0)
self.assertTrue(r <= 1.0)
self.assertTrue(g <= 1.0)
self.assertTrue(b <= 1.0)
@tm.mplskip
class TestScaleConstant(tm.TestCase):
def test_scale_constant(self):
scale = rplot.ScaleConstant(1.0)
self.assertEqual(scale(None, None), 1.0)
scale = rplot.ScaleConstant("test")
self.assertEqual(scale(None, None), "test")
class TestScaleSize(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.scale1 = rplot.ScaleShape('Name')
self.scale2 = rplot.ScaleShape('PetalLength')
def test_scale_size(self):
for index in range(len(self.data)):
marker = self.scale1(self.data, index)
self.assertTrue(marker in ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x'])
def test_scale_overflow(self):
def f():
for index in range(len(self.data)):
self.scale2(self.data, index)
self.assertRaises(ValueError, f)
@tm.mplskip
class TestRPlot(tm.TestCase):
def test_rplot1(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot2(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['.', 'smoker']))
self.plot.add(rplot.GeomPoint(colour= | rplot.ScaleRandomColour('day') | pandas.tools.rplot.ScaleRandomColour |
import time, datetime, os, collections
from dateutil import relativedelta
import numpy as np #数値計算用
import pandas as pd #データ処理用
import yfinance as yf
import pandas_datareader
#import requests #クローリング用
#import bs4 #スクレイピング用
pd.set_option('display.unicode.east_asian_width', True) #全角文字幅を考慮して表示
#銘柄情報を取得
def get_stock_code(stock_dict, data_type='list', show=False):
database = stock_dict['database']
group_type = stock_dict['group_type']
group_name = stock_dict['group_name']
stock_list = []
#東証上場銘柄
if (database=='tosho_jpx'):
file_name = 'https://www.jpx.co.jp/markets/statistics-equities/misc/tvdivq0000001vg2-att/data_j.xls'
df_stock = pd.read_excel(file_name)
df_stock['17業種区分'] = df_stock['17業種区分'].str.replace(' ', '') #余計な空白を削除
df_stock['コード'] = df_stock['コード'].astype(str).str[:4] +'.T'
#TOPIXの構成銘柄
if (database=='topix_jpx'):
file_name = 'https://www.jpx.co.jp/markets/indices/topix/tvdivq00000030ne-att/TOPIX_weight_jp.xlsx'
df_stock = pd.read_excel(file_name)
df_stock['日付'] = df_stock['日付'].replace(r'(.*/D.*)', np.nan, regex=True) #無効な値を置換
df_stock = df_stock.drop(['調整係数対象銘柄'], axis=1).dropna() #NaNを含む行を捨てる
df_stock['日付'] = pd.to_datetime(df_stock['日付'], format='%Y%m%d') #型変換
df_stock['コード'] = df_stock['コード'].astype(str).str[:4] +'.T'
#日経225の構成銘柄
if (database=='n225_nikkei'):
file_name = 'https://indexes.nikkei.co.jp/nkave/archives/file/nikkei_stock_average_weight_jp.csv'
df_stock = pd.read_csv(file_name, encoding='shift-jis').dropna()
df_stock = df_stock.rename(columns={"社名":"銘柄名"}) #列名変更
df_stock['日付'] = pd.to_datetime(df_stock['日付'], format='%Y/%m/%d') #型変換
df_stock['コード'] = df_stock['コード'].astype(str).str[:4] +'.T'
#ETFの構成銘柄
if (database=='tosho_etf'):
file_name = 'https://www.jpx.co.jp/equities/products/etfs/investors/tvdivq0000005cdd-att/nlsgeu000000vx9t.xlsx'
df_stock = pd.read_excel(file_name, header=3)
df_stock = df_stock[['コード', '略称', '連動指標名', '連動対象カテゴリー', '売買単位\n(口)']]
df_stock = df_stock.dropna() #NaNを含む行を捨てる
df_stock = df_stock.rename(columns={"略称":"銘柄名", "売買単位\n(口)":"売買単位"}) #列名変更
df_stock['コード'] = df_stock['コード'].astype(str).str[:4] +'.T'
if (group_type!='全銘柄'):
if (show==True):
print(df_stock)
counter = collections.Counter(df_stock[group_type].values.tolist()) #単語の出現回数を取得
print(counter)
#特定グループのデータだけ抽出
df_stock = df_stock[df_stock[group_type].isin(group_name)]
#stock_list = list(set(df_stock['コード'].values.tolist())) #銘柄コード
#貸借銘柄の情報
file_name = 'https://www.jpx.co.jp/listing/others/margin/tvdivq0000000od2-att/list.xls'
df_margin_stock = pd.read_excel(file_name, sheet_name='一覧', header=1)
df_margin_stock = df_margin_stock.rename(columns={"銘柄コード":"コード"}) #列名変更
df_margin_stock = df_margin_stock[df_margin_stock['貸借区分'].isin(['貸借銘柄'])] #空売りできる銘柄
df_margin_stock['コード'] = df_margin_stock['コード'].astype(str).str[:4] +'.T'
#margin_stock_list = list(set(df_margin_stock['コード'].values.tolist())) #銘柄コード
#stock_list = list(set(stock_list) & set(margin_stock_list))
df_stock = pd.merge(df_stock, df_margin_stock.drop(['銘柄名'], axis=1), on='コード', how='inner')
stock_list = sorted(df_stock['コード'].values.tolist()) #銘柄コード
if (show==True):
print(stock_list)
print()
if(data_type=='dataframe'):
return df_stock
if(data_type=='list'):
return stock_list
#株価データ取得(学習用データは5年、テストデータは0.5年くらい)
def get_stock_price(stock_list, date_min, date_max, show=False):
if(datetime.datetime(2000, 4, 1)<date_min and date_max<datetime.datetime(2021, 1, 2)):
df_price_all = pd.DataFrame()
for code in stock_list:
try:
file_relative_path = os.path.dirname(__file__)
csv_relative_path = r'../../../inout_data/price_data/' +code +r'.csv'
df_price = pd.read_csv(os.path.join(file_relative_path, csv_relative_path), index_col=0)
df_price_all['Open', code] = df_price['Open']
df_price_all['High', code] = df_price['High']
df_price_all['Low', code] = df_price['Low']
df_price_all['Close', code] = df_price['Close']
df_price_all['Adj Close', code] = df_price['Adj Close']
df_price_all['Volume', code] = df_price['Volume']
except Exception as e:
#import traceback
#print("エラー情報\n" + traceback.format_exc())
df_price_all['Open', code] = np.nan
df_price_all['High', code] = np.nan
df_price_all['Low', code] = np.nan
df_price_all['Close', code] = np.nan
df_price_all['Adj Close', code] = np.nan
df_price_all['Volume', code] = np.nan
df_price_all.columns = pd.MultiIndex.from_tuples(df_price_all.columns) #マルチカラム化
df_price_all.index = | pd.to_datetime(df_price_all.index, format='%Y-%m-%d') | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymysql
import pandas as pd
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.colors as colors
import netCDF4 as nc
from netCDF4 import Dataset
#------------------------------------------------------------------------------
# Motivación codigo sección 1--------------------------------------------------
"Código para el dibujo y cálculo de los histogramas de frecuencias horarios de la lluvia en determinados puntos de medición. Se lee como"
"un pandas los datos a dibujar de cada punto de medición para luego calcular el histograma de los acumulados y de las horas de acumulados."
"Inicialmente, se crea con el propósito de estimar la distribucion de los acumulados en los puntos de medición de los paneles experimentales "
"Se hace con los datos del 2018."
Pluvio = 'si' ##--> Para que promedie la lluvia de los dos pluviometros debe ser 'si'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################
## ----------------LECTURA DE LOS ARCHIVOS DE ACUMULADOS----------------##
##########################################################################
df_Acum_JV = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH211.csv', sep=',', index_col =0)
df_Acum_CI = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH206.csv', sep=',', index_col =0)
df_Acum_TS = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH201.csv', sep=',', index_col =0)
df_Acum_JV.index = pd.to_datetime(df_Acum_JV.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_Acum_CI.index = pd.to_datetime(df_Acum_CI.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_Acum_TS.index = pd.to_datetime(df_Acum_TS.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_Acum_JV = df_Acum_JV.between_time('06:00', '17:59')
df_Acum_CI = df_Acum_CI.between_time('06:00', '17:59')
df_Acum_TS = df_Acum_TS.between_time('06:00', '17:59')
########################################################################
## ----------------AJUSTE DE LOS DATOS DEL PLUVIÓMETRO----------------##
########################################################################
"Si uno de los archivos leidos tiene infomación de pluviometro, se deben promediar los acumulados horarios de P1 y P2 para tener un solo estimado."
if Pluvio == 'si':
df_Acum_JV['Precip'] = df_Acum_JV[['P1', 'P2']].mean(axis=1)
df_Acum_JV = df_Acum_JV.drop(['P1', 'P2'], axis=1)
########################################################################
## ----------------HISTOGRAMAS DE LA LLUVIA HORARIOS -----------------##
########################################################################
df_Acum_JV_rain = df_Acum_JV[df_Acum_JV['Precip']>0]
df_Acum_CI_rain = df_Acum_CI[df_Acum_CI['Precip']>0]
df_Acum_TS_rain = df_Acum_TS[df_Acum_TS['Precip']>0]
## -------------------------OBTENER LAS HORAS Y FECHAS LLUVIOSAS---------------------------- ##
Hora_JV = df_Acum_JV_rain.index.hour
Fecha_JV = df_Acum_JV_rain.index.date
Hora_CI = df_Acum_CI_rain.index.hour
Fecha_CI = df_Acum_CI_rain.index.date
Hora_TS = df_Acum_TS_rain.index.hour
Fecha_TS = df_Acum_TS_rain.index.date
## -----------------------------DIBUJAR LOS HISTOGRAMAS DE LAS HORAS ------ ----------------------- #
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hora_JV, bins='auto', alpha = 0.5, color = 'orange', label = 'H_Lluvia')
ax1.set_title(u'Distribución de horas lluviosas en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hora_CI, bins='auto', alpha = 0.5, color = 'orange', label = 'H_Lluvia')
ax2.set_title(u'Distribución de horas lluviosas en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hora_TS, bins='auto', alpha = 0.5, color = 'orange', label = 'H_Lluvia')
ax3.set_title(u'Distribución de horas lluviosas en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHorasLluvia_2018.png')
plt.close('all')
os.system('scp /home/nacorreasa/Escritorio/Figuras/HistoHorasLluvia_2018.png [email protected]:/var/www/nacorreasa/Graficas_Resultados/Estudio')
#------------------------------------------------------------------------------
# Motivación codigo sección 2--------------------------------------------------
"En esta seccion del codigo se pretenda encontrar la correlación rezagada entre las horas de acuulados de precipitación y las las horas"
"nubladas para poder verificar la información de los umbrales de GOES CH2 para nubes."
################################################################################################
## -------------------------------LECTURA DE DATOS DE GOES CH02------------------------------ ##
################################################################################################
#ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C2_2019_0320_0822.nc')
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C22018.nc')
## ---------------------------------AJUSTE DE LOS DATOS DE GOES CH2----------------------------------------- ##
lat = ds.variables['lat'][:, :]
lon = ds.variables['lon'][:, :]
Rad = ds.variables['Radiancias'][:, :, :]
## -- Obtener el tiempo para cada valor
tiempo = ds.variables['time']
fechas_horas = nc.num2date(tiempo[:], units=tiempo.units)
for i in range(len(fechas_horas)):
fechas_horas[i] = pd.to_datetime(fechas_horas[i] , format="%Y-%m-%d %H:%M", errors='coerce')
################################################################################################
##-------------------INCORPORANDO EL ARRAY DEL ZENITH PARA CADA HORA--------------------------##
################################################################################################
def Aclarado_visible(Path_Zenith, Path_Fechas, Rad, fechas_horas):
Z = np.load(Path_Zenith)
Fechas_Z = np.load(Path_Fechas)
daily_hours = np.arange(5, 19, 1)
Zenith = []
Fechas_Zenith = []
for i in range(len(Fechas_Z)):
if Fechas_Z[i].hour in daily_hours:
Zenith.append(Z[i, :, :])
Fechas_Zenith.append(Fechas_Z[i])
elif Fechas_Z[i].hour not in daily_hours:
pass
Zenith = np.array(Zenith)
Rad_clear = []
for i in range(len(Fechas_Zenith)):
for j in range(len(fechas_horas)):
if Fechas_Zenith[i].hour == fechas_horas[j].hour and Fechas_Zenith[i].day == fechas_horas[j].day:
Rad_clear.append(Rad[j, :, :]/np.cos(Zenith[i, :, :]))
else:
pass
Rad_clear = np.array(Rad_clear)
return Rad
Rad_Z = Aclarado_visible('/home/nacorreasa/Maestria/Datos_Tesis/hourlyZenith2018.npy', '/home/nacorreasa/Maestria/Datos_Tesis/DatesZenith.npy', Rad, fechas_horas)
del Rad
Rad = Rad_Z
## -- Selección del pixel de la TS y creación de DF
lat_index_975 = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index_975 = np.where((lon[0, :] < -75.58) & (lon[0, :] > -75.59))[0][0]
Rad_pixel_975 = Rad[:, lat_index_975, lon_index_975]
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la CI
lat_index_350 = np.where((lat[:, 0] > 6.16) & (lat[:, 0] < 6.17))[0][0]
lon_index_350 = np.where((lon[0, :] < -75.64) & (lon[0, :] > -75.65))[0][0]
Rad_pixel_350 = Rad[:, lat_index_350, lon_index_350]
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la JV
lat_index_348 = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index_348 = np.where((lon[0, :] < -75.54) & (lon[0, :] > -75.55))[0][0]
Rad_pixel_348 = Rad[:, lat_index_348, lon_index_348]
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = | pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce') | pandas.to_datetime |
import os
import numpy as np
import pandas as pd
import pytest
from janitor.testing_utils import date_data
TEST_DATA_DIR = "tests/test_data"
EXAMPLES_DIR = "examples/"
@pytest.fixture
def dataframe():
data = {
"a": [1, 2, 3] * 3,
"Bell__Chart": [1.234_523_45, 2.456_234, 3.234_612_5] * 3,
"decorated-elephant": [1, 2, 3] * 3,
"animals@#$%^": ["rabbit", "leopard", "lion"] * 3,
"cities": ["Cambridge", "Shanghai", "Basel"] * 3,
}
df = pd.DataFrame(data)
return df
@pytest.fixture
def date_dataframe():
df = | pd.DataFrame(date_data.date_list, columns=["AMOUNT", "DATE"]) | pandas.DataFrame |
import time
import datetime as dt
import pandas as pd
import numpy as np
import logging
import coloredlogs
import pytz
from typing import List, Dict, Tuple, Any
from polygon import RESTClient
from trader.common.helpers import dateify
class PolygonFinancials():
def __init__(self, financials: pd.DataFrame, dividends: pd.DataFrame, splits: pd.DataFrame):
self.financials = financials
self.splits = splits
self.dividends = dividends
class PolygonListener():
def __init__(self,
key: str = '<KEY>',
limit: int = 50000,
request_sleep: int = 0):
self.client: RESTClient = RESTClient(key)
self.request_sleep = request_sleep
self.limit = limit
def date(self, date_time: dt.datetime) -> str:
return dt.datetime.strftime(date_time, '%Y-%m-%d')
def date_from_ts(self, unixtimestamp: int, zone: str = 'America/New_York') -> dt.datetime:
return dt.datetime.fromtimestamp(unixtimestamp / 1000.0, tz=pytz.timezone(zone))
def date_from_nanots(self, unixtimestamp: int, zone: str = 'America/New_York') -> dt.datetime:
return dt.datetime.fromtimestamp(unixtimestamp / 1000000000.0, tz=pytz.timezone(zone))
def round_to_second(self, unixtimestamp: int, zone: str = 'America/New_York') -> np.int64:
d = self.date_from_nanots(unixtimestamp, zone)
date_time = dt.datetime(d.year, d.month, d.day, d.hour, d.minute, d.second)
return np.int64(date_time.timestamp())
def get_all_equity_symbols(self):
yesterday = dt.datetime.now() - dt.timedelta(days=1)
result = self.client.stocks_equities_grouped_daily('US', 'STOCKS', self.date(yesterday))
symbols = [r['T'] for r in result.results] # type: ignore
return symbols
def get_grouped_daily(self, market: str, date_time: dt.datetime) -> pd.DataFrame:
resp = self.client.stocks_equities_grouped_daily('US', market, date=self.date(date_time))
columns = {
'T': 'symbol',
'v': 'volume',
'o': 'open',
'c': 'close',
'h': 'high',
'l': 'low',
't': 'date',
'vw': 'vwap',
'n': 'items'
}
df = pd.DataFrame(resp.results) # type: ignore
df = df.rename(columns=columns) # type: ignore
return df
def get_aggregates(self,
symbol: str,
multiplier: int,
timespan: str,
start_date: dt.datetime,
end_date: dt.datetime) -> pd.DataFrame:
timespans = ['day', 'minute', 'hour', 'week', 'month', 'quarter', 'year']
if timespan not in timespans:
raise ValueError('incorrect timespan, must be {}'.format(timespans))
start_date = dateify(start_date, timezone='America/New_York')
end_date = dateify(end_date + dt.timedelta(days=1), timezone='America/New_York')
logging.info('get_aggregates {} mul: {} timespan: {} {} {}'.format(symbol,
multiplier,
timespan,
start_date,
end_date))
result = self.client.stocks_equities_aggregates(symbol,
multiplier,
timespan,
self.date(start_date),
self.date(end_date),
**{'limit': self.limit})
time.sleep(self.request_sleep)
columns = {
'T': 'symbol',
'v': 'volume',
'o': 'open',
'c': 'close',
'h': 'high',
'l': 'low',
't': 'date',
'n': 'items',
'vw': 'vwap'
}
df = pd.DataFrame(result.results) # type: ignore
df = df.rename(columns=columns) # type: ignore
df['symbol'] = symbol
df['date'] = df['date'].apply(self.date_from_ts) # convert to proper timezone
df.index = df['date']
df.drop(columns=['date'], inplace=True)
df['volume'] = df['volume'].astype(int)
df = df.reindex(['symbol', 'open', 'close', 'high', 'low', 'volume', 'vwap', 'items'], axis=1)
# we have more work to do
if len(df) == 50000:
last_date = df.index[-1].to_pydatetime()
combined = df.append(self.get_aggregates(symbol, multiplier, timespan, last_date, end_date))
result = combined[~combined.index.duplicated()]
return result[(result.index >= start_date) & (result.index <= end_date)]
return df[(df.index >= start_date) & (df.index <= end_date)]
def get_aggregates_as_ib(self,
symbol: str,
multiplier: int,
timespan: str,
start_date: dt.datetime,
end_date: dt.datetime) -> pd.DataFrame:
result = self.get_aggregates(symbol, multiplier, timespan, start_date, end_date)
# interactive brokers history mapping
mapping = {
'open_trades': 'open',
'high_trades': 'high',
'low_trades': 'low',
'close_trades': 'close',
'volume_trades': 'volume',
'average_trades': 'vwap',
'open_bid': 'open',
'high_bid': 'high',
'low_bid': 'low',
'close_bid': 'close',
'open_ask': 'open',
'high_ask': 'high',
'low_ask': 'low',
'close_ask': 'close',
'barCount_trades': 'items',
}
for key, value in mapping.items():
result[key] = result[value]
result.rename_axis(None, inplace=True)
result.drop(columns=['symbol', 'open', 'close', 'high', 'low', 'volume', 'vwap', 'items'], inplace=True)
result = result.reindex(
['high_ask', 'high_trades', 'close_trades', 'low_bid', 'average_trades',
'open_trades', 'low_trades', 'barCount_trades', 'open_bid', 'volume_trades',
'low_ask', 'high_bid', 'close_ask', 'close_bid', 'open_ask'], axis=1) # type: ignore
return result
def get_financials(self, symbol: str) -> PolygonFinancials:
financials = self.client.reference_stock_financials(symbol, **{'limit': self.limit}).results
dividends = self.client.reference_stock_dividends(symbol, **{'limit': self.limit}).results
splits = self.client.reference_stock_splits(symbol, **{'limit': self.limit}).results
result = PolygonFinancials(pd.DataFrame(financials), | pd.DataFrame(dividends) | pandas.DataFrame |
import random
from pathlib import Path
from urllib.parse import urljoin
import pandas as pd
import requests
import pytest
from dwdbulk.api import observations
from dwdbulk.api.observations import (
__gather_resource_files,
get_measurement_data_from_url,
get_measurement_data_urls,
get_measurement_parameters,
get_resolutions,
get_stations,
get_stations_list_from_url,
)
from dwdbulk.util import (
germany_climate_url,
get_resource_index,
get_stations_lookup,
parse_htmllist,
station_metadata,
y2k_date_parser,
)
measurement_parameters_10_minutes = [
"air_temperature",
"extreme_temperature",
"extreme_wind",
"precipitation",
"solar",
"wind",
]
measurement_parameters_hourly = [
"air_temperature",
"cloud_type",
"cloudiness",
"dew_point",
"precipitation",
"pressure",
"soil_temperature",
"solar",
"sun",
"visibility",
"wind",
"wind_synop",
]
measurement_parameters_daily = [
"kl",
"more_precip",
"soil_temperature",
"solar",
"water_equiv",
"weather_phenomena",
]
# TODO: Fill in measurement parameters for other resolutions
resolution_and_measurement_standards = {
"10_minutes": measurement_parameters_10_minutes,
"1_minute": ["precipitation"],
# TODO: Below data series have different format (zipped raw & metadata); need to adapt parser
# "annual": ["more_precip", "weather_phenomena", "kl"],
# "daily": measurement_parameters_daily,
# "hourly": measurement_parameters_hourly,
# "monthly": ["more_precip", "weather_phenomena", "kl"],
# "multi_annual": [],
# "subdaily": [],
}
@pytest.mark.parametrize(
"resolution",
[k for k, v in resolution_and_measurement_standards.items() if v != []],
)
def test_parse_htmllist(resolution):
url = urljoin(
"https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/",
resolution,
)
r = requests.get(url)
extracted_links = parse_htmllist(url, r.text)
expected_links = resolution_and_measurement_standards[resolution]
expected_links = [
urljoin(germany_climate_url, str(Path(resolution) / link) + "/")
for link in expected_links
]
assert sorted(extracted_links) == sorted(expected_links)
def test_get_resource_index():
url = urljoin(germany_climate_url, "10_minutes/")
extracted_links = get_resource_index(url, "/")
expected_links = resolution_and_measurement_standards["10_minutes"]
expected_links = [urljoin(url, link + "/") for link in expected_links]
assert sorted(extracted_links) == sorted(expected_links)
@pytest.mark.parametrize(
"resolution,parameter",
[(k, v_i) for k, v in resolution_and_measurement_standards.items() for v_i in v],
)
def test_gather_resource_files_helper(resolution, parameter):
files = __gather_resource_files(resolution, parameter)
assert len([x for x in files if "Beschreibung_Stationen.txt" in x]) > 0
def test_get_resource_all():
"""
Test that all links are returned when extension is not specified.
"""
extracted_links = get_resource_index(germany_climate_url)
expected_links = [
urljoin(germany_climate_url, link + "/")
for link in resolution_and_measurement_standards.keys()
]
assert set(expected_links).issubset(extracted_links)
def test_get_all_resolutions():
resolutions = get_resolutions()
assert set(resolution_and_measurement_standards.keys()).issubset(resolutions)
@pytest.mark.parametrize(
"resolution,expected_measurement_parameters",
[(k, v) for k, v in resolution_and_measurement_standards.items()],
)
def test_get_measurement_parameters(resolution, expected_measurement_parameters):
# If measurement parameters not currently specified, then skip test.
if expected_measurement_parameters == []:
return
expected_measurement_parameters = [
{"resolution": resolution, "parameter": i}
for i in expected_measurement_parameters
]
extracted_measurement_parameters = get_measurement_parameters(resolution)
for i in extracted_measurement_parameters:
assert i in extracted_measurement_parameters
for j in extracted_measurement_parameters:
assert j in extracted_measurement_parameters
@pytest.mark.parametrize(
"resolution,parameter",
[(k, v_i) for k, v in resolution_and_measurement_standards.items() for v_i in v],
)
def test_get_stations(resolution, parameter):
"Test fetching station data. Test randomly chooses a measurement parameter for each of the three supported time frames."
# If measurement parameters not currently specified, then skip test.
if parameter == []:
return
df = get_stations(resolution, parameter)
assert df.date_start.min() > pd.Timestamp("1700-01-01", tz="UTC")
assert df.date_start.max() < pd.Timestamp("2200-01-01", tz="UTC")
states = [
"Baden-Württemberg",
"Nordrhein-Westfalen",
"Hessen",
"Bayern",
"Niedersachsen",
"Sachsen-Anhalt",
"Rheinland-Pfalz",
"Sachsen",
"Mecklenburg-Vorpommern",
"Schleswig-Holstein",
"Brandenburg",
"Thüringen",
"Saarland",
"Berlin",
"Bremen",
"Hamburg",
"Tirol",
]
assert set(df.state.unique()).issubset(set(states))
assert df.height.min() >= 0
assert df.geo_lat.min() >= -90
assert df.geo_lat.max() <= 90
assert df.geo_lon.min() >= -180
assert df.geo_lon.max() <= 180
expected_colnames = [v["name"] for k, v in station_metadata.items()]
assert sorted(df.columns) == sorted(expected_colnames)
assert df.shape[0] > 5
@pytest.mark.parametrize(
"resolution,parameter",
[(k, v_i) for k, v in resolution_and_measurement_standards.items() for v_i in v],
)
def test_get_measurement_data_urls_and_data(resolution, parameter):
files = get_measurement_data_urls(resolution, parameter)
assert len(files) > 0
files_sample = random.sample(files, 2)
for url in files_sample:
df = get_measurement_data_from_url(url)
df.head()
assert set(["station_id", "date_start"]).issubset(df.columns)
assert df.date_start.min() > pd.Timestamp("1700-01-01", tz="UTC")
assert df.date_start.max() < pd.Timestamp("2200-01-01", tz="UTC")
def test_observations_stations_available_in_lookup():
"""Test all stations available in lookup are also available in observations data."""
# NOTE: Different stations available for different resolutions and parameters; need to adjust this...
resolution = "10_minutes"
parameter = "air_temperature"
df_stations = get_stations_lookup()
df_stations.observations_station_id
available_stations = (
get_stations(resolution, parameter)["station_id"].unique().tolist()
)
assert set(df_stations.observations_station_id.unique().tolist()).issubset(
set(available_stations)
)
def test_observations_get_data_all():
"""Test that get_data returns reasonable results for all data, for a single station."""
resolution = "10_minutes"
parameter = "air_temperature"
df_stations = get_stations_lookup()
station_ids = df_stations.observations_station_id.head(1).tolist()
df = observations.get_data(
parameter,
station_ids=station_ids,
date_start=None,
date_end=None,
resolution=resolution,
)
date_end = pd.Timestamp.now(tz="UTC").floor("h") - pd.Timedelta("1.5h")
assert sorted(station_ids), sorted(df.station_id.unique().tolist())
assert df.duplicated(subset=["station_id", "date_start"]).sum() == 0
assert df.date_start.min() < pd.Timestamp("2000-01-01", tz="UTC")
assert df.date_start.max() >= date_end
def test_observations_get_data_recent():
"""Test that get_data returns reasonable results for recent data, for a subset of stations."""
resolution = "10_minutes"
parameter = "air_temperature"
date_end = pd.Timestamp.now(tz="UTC").floor("h")
date_start = date_end - pd.Timedelta("5 days")
df_stations = get_stations_lookup()
station_ids = df_stations.observations_station_id.sample(10).unique().tolist()
df = observations.get_data(
parameter,
station_ids=station_ids,
date_start=date_start,
date_end=date_end,
resolution=resolution,
)
assert sorted(station_ids), sorted(df.station_id.unique().tolist())
assert df.duplicated(subset=["station_id", "date_start"]).sum() == 0
assert df.date_start.min() == date_start
assert df.date_start.max() >= date_end - | pd.Timedelta("120 minutes") | pandas.Timedelta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.